aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-12-05 16:41:55 +0100
committerMichaël Zasso <targos@protonmail.com>2017-12-06 12:52:07 +0100
commit1854ba04e9a68f062beb299dd6e1479279b26363 (patch)
treed5b2df9b8c1deb6388f7a728fca8e1c98c779abe /deps/v8/src/heap
parentb52c23b75f96e1c9d2c7b3a7e5619170d0a0d8e1 (diff)
downloadandroid-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.tar.gz
android-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.tar.bz2
android-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.zip
deps: update V8 to 6.3.292.46
PR-URL: https://github.com/nodejs/node/pull/16271 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/barrier.h77
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc171
-rw-r--r--deps/v8/src/heap/concurrent-marking.h19
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc8
-rw-r--r--deps/v8/src/heap/heap-inl.h117
-rw-r--r--deps/v8/src/heap/heap.cc903
-rw-r--r--deps/v8/src/heap/heap.h64
-rw-r--r--deps/v8/src/heap/incremental-marking.cc103
-rw-r--r--deps/v8/src/heap/incremental-marking.h117
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h19
-rw-r--r--deps/v8/src/heap/mark-compact.cc271
-rw-r--r--deps/v8/src/heap/mark-compact.h85
-rw-r--r--deps/v8/src/heap/marking.h7
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc23
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h24
-rw-r--r--deps/v8/src/heap/objects-visiting.cc28
-rw-r--r--deps/v8/src/heap/objects-visiting.h3
-rw-r--r--deps/v8/src/heap/scavenger-inl.h6
-rw-r--r--deps/v8/src/heap/scavenger.cc23
-rw-r--r--deps/v8/src/heap/scavenger.h56
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.cc100
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.h156
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc629
-rw-r--r--deps/v8/src/heap/spaces-inl.h122
-rw-r--r--deps/v8/src/heap/spaces.cc298
-rw-r--r--deps/v8/src/heap/spaces.h150
-rw-r--r--deps/v8/src/heap/store-buffer.cc4
-rw-r--r--deps/v8/src/heap/store-buffer.h2
-rw-r--r--deps/v8/src/heap/worklist.h28
31 files changed, 1716 insertions, 1901 deletions
diff --git a/deps/v8/src/heap/barrier.h b/deps/v8/src/heap/barrier.h
new file mode 100644
index 0000000000..d945a83d90
--- /dev/null
+++ b/deps/v8/src/heap/barrier.h
@@ -0,0 +1,77 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BARRIER_H_
+#define V8_HEAP_BARRIER_H_
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+// Barrier that can be used once to synchronize a dynamic number of tasks
+// working concurrently.
+//
+// Usage:
+// void RunConcurrently(OneShotBarrier* shared_barrier) {
+// shared_barrier->Start();
+// do {
+// {
+// /* process work and create new work */
+// barrier->NotifyAll();
+// /* process work and create new work */
+// }
+// } while(!shared_barrier->Wait());
+// }
+//
+// Note: If Start() is not called in time, e.g., because the first concurrent
+// task is already done processing all work, then Done() will return true
+// immediately.
+class OneshotBarrier {
+ public:
+ OneshotBarrier() : tasks_(0), waiting_(0), done_(false) {}
+
+ void Start() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ tasks_++;
+ }
+
+ void NotifyAll() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (waiting_ > 0) condition_.NotifyAll();
+ }
+
+ bool Wait() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (done_) return true;
+
+ DCHECK_LE(waiting_, tasks_);
+ waiting_++;
+ if (waiting_ == tasks_) {
+ done_ = true;
+ condition_.NotifyAll();
+ } else {
+ // Spurious wakeup is ok here.
+ condition_.Wait(&mutex_);
+ }
+ waiting_--;
+ return done_;
+ }
+
+ // Only valid to be called in a sequential setting.
+ bool DoneForTesting() const { return done_; }
+
+ private:
+ base::ConditionVariable condition_;
+ base::Mutex mutex_;
+ int tasks_;
+ int waiting_;
+ bool done_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BARRIER_H_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 95b3a230ab..60bcbe9bab 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -121,12 +121,6 @@ class ConcurrentMarkingVisitor final
int VisitJSApiObject(Map* map, JSObject* object) {
if (marking_state_.IsGrey(object)) {
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- // It is OK to iterate body of JS API object here because they do not have
- // unboxed double fields.
- DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
- JSObject::BodyDescriptor::IterateBody(object, size, this);
// The main thread will do wrapper tracing in Blink.
bailout_.Push(object);
}
@@ -134,6 +128,52 @@ class ConcurrentMarkingVisitor final
}
// ===========================================================================
+ // Strings with pointers =====================================================
+ // ===========================================================================
+
+ int VisitConsString(Map* map, ConsString* object) {
+ int size = ConsString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ int VisitSlicedString(Map* map, SlicedString* object) {
+ int size = SlicedString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ int VisitThinString(Map* map, ThinString* object) {
+ int size = ThinString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ // ===========================================================================
+ // Strings without pointers ==================================================
+ // ===========================================================================
+
+ int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
+ int size = SeqOneByteString::SizeFor(object->synchronized_length());
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ return size;
+ }
+
+ int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
+ int size = SeqTwoByteString::SizeFor(object->synchronized_length());
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ return size;
+ }
+
+ // ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
@@ -215,11 +255,12 @@ class ConcurrentMarkingVisitor final
}
int VisitTransitionArray(Map* map, TransitionArray* array) {
- if (marking_state_.IsGrey(array)) {
- // TODO(ulan): process transition arrays.
- bailout_.Push(array);
- }
- return 0;
+ if (!ShouldVisit(array)) return 0;
+ VisitMapPointer(array, array->map_slot());
+ int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
+ TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ weak_objects_->transition_arrays.Push(task_id_, array);
+ return size;
}
int VisitWeakCell(Map* map, WeakCell* object) {
@@ -283,13 +324,14 @@ class ConcurrentMarkingVisitor final
SlotSnapshot* slot_snapshot_;
};
- const SlotSnapshot& MakeSlotSnapshot(Map* map, HeapObject* object, int size) {
+ template <typename T>
+ const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
// TODO(ulan): Iterate only the existing fields and skip slack at the end
// of the object.
SlotSnapshottingVisitor visitor(&slot_snapshot_);
visitor.VisitPointer(object,
reinterpret_cast<Object**>(object->map_slot()));
- JSObject::BodyDescriptor::IterateBody(object, size, &visitor);
+ T::BodyDescriptor::IterateBody(object, size, &visitor);
return slot_snapshot_;
}
ConcurrentMarking::MarkingWorklist::View shared_;
@@ -325,17 +367,20 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
+ MarkingWorklist* on_hold,
WeakObjects* weak_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
+ on_hold_(on_hold),
weak_objects_(weak_objects),
- pending_task_count_(0) {
+ pending_task_count_(0),
+ task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
- for (int i = 0; i <= kTasks; i++) {
+ for (int i = 0; i <= kMaxTasks; i++) {
is_pending_[i] = false;
}
}
@@ -351,7 +396,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
task_id);
double time_ms;
- size_t total_bytes_marked = 0;
+ size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
@@ -361,9 +406,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
bool done = false;
while (!done) {
base::LockGuard<base::Mutex> guard(&task_state->lock);
- size_t bytes_marked = 0;
+ size_t current_marked_bytes = 0;
int objects_processed = 0;
- while (bytes_marked < kBytesUntilInterruptCheck &&
+ while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject* object;
if (!shared_->Pop(task_id, &object)) {
@@ -375,13 +420,15 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address new_space_limit = heap_->new_space()->original_limit();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
- bailout_->Push(task_id, object);
+ on_hold_->Push(task_id, object);
} else {
Map* map = object->synchronized_map();
- bytes_marked += visitor.Visit(map, object);
+ current_marked_bytes += visitor.Visit(map, object);
}
}
- total_bytes_marked += bytes_marked;
+ marked_bytes += current_marked_bytes;
+ base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
+ marked_bytes);
if (task_state->interrupt_request.Value()) {
task_state->interrupt_condition.Wait(&task_state->lock);
}
@@ -391,9 +438,12 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
// young generation GC.
base::LockGuard<base::Mutex> guard(&task_state->lock);
bailout_->FlushToGlobal(task_id);
+ on_hold_->FlushToGlobal(task_id);
}
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
+ base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
+ total_marked_bytes_.Increment(marked_bytes);
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
@@ -404,28 +454,34 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Task %d concurrently marked %dKB in %.2fms\n", task_id,
- static_cast<int>(total_bytes_marked / KB), time_ms);
+ static_cast<int>(marked_bytes / KB), time_ms);
}
}
void ConcurrentMarking::ScheduleTasks() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
- if (pending_task_count_ < kTasks) {
- // Task id 0 is for the main thread.
- for (int i = 1; i <= kTasks; i++) {
- if (!is_pending_[i]) {
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "Scheduling concurrent marking task %d\n", i);
- }
- task_state_[i].interrupt_request.SetValue(false);
- is_pending_[i] = true;
- ++pending_task_count_;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new Task(heap_->isolate(), this, &task_state_[i], i),
- v8::Platform::kShortRunningTask);
+ if (task_count_ == 0) {
+ // TODO(ulan): Increase the number of tasks for platforms that benefit
+ // from it.
+ task_count_ = static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
+ task_count_ = Max(Min(task_count_, kMaxTasks), 1);
+ }
+ // Task id 0 is for the main thread.
+ for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
+ if (!is_pending_[i]) {
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Scheduling concurrent marking task %d\n", i);
}
+ task_state_[i].interrupt_request.SetValue(false);
+ is_pending_[i] = true;
+ ++pending_task_count_;
+ Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
+ cancelable_id_[i] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
}
}
}
@@ -441,18 +497,40 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
}
+void ConcurrentMarking::WaitForTasks() {
+ if (!FLAG_concurrent_marking) return;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ while (pending_task_count_ > 0) {
+ pending_condition_.Wait(&pending_lock_);
+ }
+}
+
void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
+ CancelableTaskManager* task_manager =
+ heap_->isolate()->cancelable_task_manager();
+ for (int i = 1; i <= task_count_; i++) {
+ if (is_pending_[i]) {
+ if (task_manager->TryAbort(cancelable_id_[i]) ==
+ CancelableTaskManager::kTaskAborted) {
+ is_pending_[i] = false;
+ --pending_task_count_;
+ }
+ }
+ }
while (pending_task_count_ > 0) {
pending_condition_.Wait(&pending_lock_);
}
+ for (int i = 1; i <= task_count_; i++) {
+ DCHECK(!is_pending_[i]);
+ }
}
void ConcurrentMarking::FlushLiveBytes(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= task_count_; i++) {
LiveBytesMap& live_bytes = task_state_[i].live_bytes;
for (auto pair : live_bytes) {
// ClearLiveness sets the live bytes to zero.
@@ -463,32 +541,43 @@ void ConcurrentMarking::FlushLiveBytes(
}
live_bytes.clear();
}
+ total_marked_bytes_.SetValue(0);
}
void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= task_count_; i++) {
if (task_state_[i].live_bytes.count(chunk)) {
task_state_[i].live_bytes[chunk] = 0;
}
}
}
+size_t ConcurrentMarking::TotalMarkedBytes() {
+ size_t result = 0;
+ for (int i = 1; i <= task_count_; i++) {
+ result +=
+ base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
+ }
+ result += total_marked_bytes_.Value();
+ return result;
+}
+
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
// Request task_state for all tasks.
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
}
// Now take a lock to ensure that the tasks are waiting.
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
concurrent_marking_->task_state_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
- for (int i = kTasks; i >= 1; i--) {
+ for (int i = kMaxTasks; i >= 1; i--) {
concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
concurrent_marking_->task_state_[i].lock.Unlock();
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index aa73db3a6a..0f0c8bf992 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -36,13 +36,15 @@ class ConcurrentMarking {
ConcurrentMarking* concurrent_marking_;
};
- static const int kTasks = 4;
+ static const int kMaxTasks = 4;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
- MarkingWorklist* bailout, WeakObjects* weak_objects);
+ MarkingWorklist* bailout, MarkingWorklist* on_hold,
+ WeakObjects* weak_objects);
void ScheduleTasks();
+ void WaitForTasks();
void EnsureCompleted();
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
@@ -51,6 +53,10 @@ class ConcurrentMarking {
// scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk);
+ int TaskCount() { return task_count_; }
+
+ size_t TotalMarkedBytes();
+
private:
struct TaskState {
// When the concurrent marking task has this lock, then objects in the
@@ -63,6 +69,7 @@ class ConcurrentMarking {
// flag is cleared by the main thread.
base::ConditionVariable interrupt_condition;
LiveBytesMap live_bytes;
+ size_t marked_bytes;
char cache_line_padding[64];
};
class Task;
@@ -70,12 +77,16 @@ class ConcurrentMarking {
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
+ MarkingWorklist* on_hold_;
WeakObjects* weak_objects_;
- TaskState task_state_[kTasks + 1];
+ TaskState task_state_[kMaxTasks + 1];
+ base::AtomicNumber<size_t> total_marked_bytes_;
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_;
- bool is_pending_[kTasks + 1];
+ bool is_pending_[kMaxTasks + 1];
+ CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1];
+ int task_count_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 62c3313924..1e10e81ddb 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -47,7 +47,7 @@ void GCIdleTimeHeapState::Print() {
size_t GCIdleTimeHandler::EstimateMarkingStepSize(
double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
- DCHECK(idle_time_in_ms > 0);
+ DCHECK_LT(0, idle_time_in_ms);
if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index e14fbb4862..7bfe0adfa0 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -234,7 +234,7 @@ void GCTracer::Stop(GarbageCollector collector) {
return;
}
- DCHECK(start_counter_ >= 0);
+ DCHECK_LE(0, start_counter_);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
(collector == MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
@@ -462,8 +462,8 @@ void GCTracer::PrintNVP() const {
"scavenge=%.2f "
"scavenge.roots=%.2f "
"scavenge.weak=%.2f "
- "scavenge.weak_global_handles.identify=%.2f"
- "scavenge.weak_global_handles.process=%.2f"
+ "scavenge.weak_global_handles.identify=%.2f "
+ "scavenge.weak_global_handles.process=%.2f "
"scavenge.parallel=%.2f "
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
@@ -608,6 +608,7 @@ void GCTracer::PrintNVP() const {
"mark=%.1f "
"mark.finish_incremental=%.1f "
"mark.roots=%.1f "
+ "mark.main=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeral=%.1f "
"mark.weak_closure.weak_handles=%.1f "
@@ -694,6 +695,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
current_.scopes[Scope::MC_MARK_ROOTS],
+ current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 49d69c80e2..a966fa03d8 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -18,7 +18,6 @@
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/isolate.h"
-#include "src/list-inl.h"
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
@@ -26,6 +25,7 @@
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -381,10 +381,6 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
-bool Heap::DeoptMaybeTenuredAllocationSites() {
- return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
-}
-
bool Heap::InNewSpace(Object* object) {
// Inlined check from NewSpace::Contains.
bool result =
@@ -542,25 +538,12 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
(*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
}
-
-void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
- global_pretenuring_feedback_.erase(site);
-}
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}
-void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
- old_space_strings_.reserve(old_space_strings_.size() +
- new_space_strings_.size());
- std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
- std::back_inserter(old_space_strings_));
- new_space_strings_.clear();
-}
-
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
if (heap_->InNewSpace(string)) {
@@ -570,46 +553,6 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
- if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
- new_space_strings_.data() + new_space_strings_.size());
- }
-}
-
-void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
- IterateNewSpaceStrings(v);
- if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
- old_space_strings_.data() + old_space_strings_.size());
- }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void Heap::ExternalStringTable::Verify() {
-#ifdef DEBUG
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- DCHECK(heap_->InNewSpace(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- DCHECK(!heap_->InNewSpace(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
-#endif
-}
-
-
-void Heap::ExternalStringTable::AddOldString(String* string) {
- DCHECK(string->IsExternalString());
- DCHECK(!heap_->InNewSpace(string));
- old_space_strings_.push_back(string);
-}
-
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
}
@@ -632,70 +575,12 @@ int Heap::NextScriptId() {
return last_id;
}
-void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
- set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
- // TODO(tebbi): Remove second half of DCHECK once
- // FLAG_harmony_restrict_constructor_return is gone.
- DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
- construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
- set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
- // TODO(tebbi): Remove second half of DCHECK once
- // FLAG_harmony_restrict_constructor_return is gone.
- DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
- construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
- set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
- DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
- set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
-}
-
int Heap::GetNextTemplateSerialNumber() {
int next_serial_number = next_template_serial_number()->value() + 1;
set_next_template_serial_number(Smi::FromInt(next_serial_number));
return next_serial_number;
}
-void Heap::SetSerializedTemplates(FixedArray* templates) {
- DCHECK_EQ(empty_fixed_array(), serialized_templates());
- DCHECK(isolate()->serializer_enabled());
- set_serialized_templates(templates);
-}
-
-void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
- DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
- DCHECK(isolate()->serializer_enabled());
- set_serialized_global_proxy_sizes(sizes);
-}
-
-void Heap::CreateObjectStats() {
- if (V8_LIKELY(FLAG_gc_stats == 0)) return;
- if (!live_object_stats_) {
- live_object_stats_ = new ObjectStats(this);
- }
- if (!dead_object_stats_) {
- dead_object_stats_ = new ObjectStats(this);
- }
-}
-
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_.Increment(1);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index b13ec784f5..458c6c7e09 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -24,6 +24,7 @@
#include "src/feedback-vector.h"
#include "src/global-handles.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/barrier.h"
#include "src/heap/code-stats.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
@@ -50,6 +51,8 @@
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -58,6 +61,54 @@
namespace v8 {
namespace internal {
+void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
+ set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
+ set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
+ set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
+ set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
+ set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
+ set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetSerializedTemplates(FixedArray* templates) {
+ DCHECK_EQ(empty_fixed_array(), serialized_templates());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_templates(templates);
+}
+
+void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+ DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_global_proxy_sizes(sizes);
+}
+
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
@@ -183,7 +234,7 @@ Heap::Heap()
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
// Ensure old_generation_size_ is a multiple of kPageSize.
- DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
+ DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
@@ -703,6 +754,14 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
}
} // namespace
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+ global_pretenuring_feedback_.erase(site);
+}
+
+bool Heap::DeoptMaybeTenuredAllocationSites() {
+ return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+}
+
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
@@ -832,18 +891,6 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
- (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
- (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_code_space()->AddSample(
- static_cast<int>((code_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
- isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
- (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
- (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
@@ -888,9 +935,6 @@ void Heap::GarbageCollectionEpilogue() {
ReportStatisticsAfterGC();
#endif // DEBUG
- // Remember the last top pointer so that we can later find out
- // whether we allocated in new space since the last GC.
- new_space_top_after_last_gc_ = new_space()->top();
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
{
@@ -1335,8 +1379,11 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
if (space == MAP_SPACE) {
// We allocate each map individually to avoid fragmentation.
maps->clear();
- DCHECK_EQ(1, reservation->size());
- int num_maps = reservation->at(0).size / Map::kSize;
+ DCHECK_LE(reservation->size(), 2);
+ int reserved_size = 0;
+ for (const Chunk& c : *reservation) reserved_size += c.size;
+ DCHECK_EQ(0, reserved_size % Map::kSize);
+ int num_maps = reserved_size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
// The deserializer will update the skip list.
AllocationResult allocation = map_space()->AllocateRawUnaligned(
@@ -1356,8 +1403,10 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
}
} else if (space == LO_SPACE) {
// Just check that we can allocate during deserialization.
- DCHECK_EQ(1, reservation->size());
- perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
+ DCHECK_LE(reservation->size(), 2);
+ int reserved_size = 0;
+ for (const Chunk& c : *reservation) reserved_size += c.size;
+ perform_gc = !CanExpandOldGeneration(reserved_size);
} else {
for (auto& chunk : *reservation) {
AllocationResult allocation;
@@ -1379,7 +1428,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
- DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
+ DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
+ space);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1667,8 +1717,6 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
-
- mark_compact_collector()->marking_worklist()->StopUsing();
}
@@ -1787,7 +1835,7 @@ class ScavengingItem : public ItemParallelJob::Item {
class ScavengingTask final : public ItemParallelJob::Task {
public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, Scavenger::Barrier* barrier)
+ ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
: ItemParallelJob::Task(heap->isolate()),
heap_(heap),
scavenger_(scavenger),
@@ -1803,10 +1851,9 @@ class ScavengingTask final : public ItemParallelJob::Task {
item->Process(scavenger_);
item->MarkFinished();
}
- while (!barrier_->Done()) {
+ do {
scavenger_->Process(barrier_);
- barrier_->Wait();
- }
+ } while (!barrier_->Wait());
scavenger_->Process();
}
if (FLAG_trace_parallel_scavenge) {
@@ -1820,7 +1867,7 @@ class ScavengingTask final : public ItemParallelJob::Task {
private:
Heap* const heap_;
Scavenger* const scavenger_;
- Scavenger::Barrier* const barrier_;
+ OneshotBarrier* const barrier_;
};
class PageScavengingItem final : public ScavengingItem {
@@ -1909,9 +1956,9 @@ void Heap::Scavenge() {
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = IsLogging(isolate());
const int num_scavenge_tasks = NumberOfScavengeTasks();
- Scavenger::Barrier barrier;
- CopiedList copied_list(num_scavenge_tasks);
- PromotionList promotion_list(num_scavenge_tasks);
+ OneshotBarrier barrier;
+ Scavenger::CopiedList copied_list(num_scavenge_tasks);
+ Scavenger::PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] =
new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
@@ -2044,6 +2091,21 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
return string->IsExternalString() ? string : nullptr;
}
+void Heap::ExternalStringTable::Verify() {
+#ifdef DEBUG
+ for (size_t i = 0; i < new_space_strings_.size(); ++i) {
+ Object* obj = Object::cast(new_space_strings_[i]);
+ DCHECK(heap_->InNewSpace(obj));
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
+ }
+ for (size_t i = 0; i < old_space_strings_.size(); ++i) {
+ Object* obj = Object::cast(old_space_strings_[i]);
+ DCHECK(!heap_->InNewSpace(obj));
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
+ }
+#endif
+}
+
void Heap::ExternalStringTable::UpdateNewSpaceReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (new_space_strings_.empty()) return;
@@ -2060,12 +2122,12 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
DCHECK(target->IsExternalString());
if (heap_->InNewSpace(target)) {
- // String is still in new space. Update the table entry.
+ // String is still in new space. Update the table entry.
*last = target;
++last;
} else {
- // String got promoted. Move it to the old string list.
- AddOldString(target);
+ // String got promoted. Move it to the old string list.
+ old_space_strings_.push_back(target);
}
}
@@ -2078,6 +2140,29 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
#endif
}
+void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
+ old_space_strings_.reserve(old_space_strings_.size() +
+ new_space_strings_.size());
+ std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
+ std::back_inserter(old_space_strings_));
+ new_space_strings_.clear();
+}
+
+void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
+ if (!new_space_strings_.empty()) {
+ v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
+ new_space_strings_.data() + new_space_strings_.size());
+ }
+}
+
+void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
+ IterateNewSpaceStrings(v);
+ if (!old_space_strings_.empty()) {
+ v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
+ old_space_strings_.data() + old_space_strings_.size());
+ }
+}
+
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
external_string_table_.UpdateNewSpaceReferences(updater_func);
@@ -2238,7 +2323,7 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
- DCHECK(filler_size > 0);
+ DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
@@ -2296,6 +2381,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_weak_cell_cache(Smi::kZero);
+ map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
@@ -2303,6 +2389,11 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(instance_type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
@@ -2316,7 +2407,6 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_instance_size(instance_size);
map->clear_unused();
map->set_inobject_properties_or_constructor_function_index(0);
- map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->set_weak_cell_cache(Smi::kZero);
@@ -2359,307 +2449,6 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
}
-const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- { type, size, k##camel_name##MapRootIndex } \
- ,
- STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
-#undef STRING_TYPE_ELEMENT
-};
-
-
-const Heap::ConstantStringTable Heap::constant_string_table[] = {
- {"", kempty_stringRootIndex},
-#define CONSTANT_STRING_ELEMENT(name, contents) \
- { contents, k##name##RootIndex } \
- ,
- INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
-#undef CONSTANT_STRING_ELEMENT
-};
-
-
-const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
- ,
- STRUCT_LIST(STRUCT_TABLE_ELEMENT)
-#undef STRUCT_TABLE_ELEMENT
-};
-
-namespace {
-
-void FinalizePartialMap(Heap* heap, Map* map) {
- map->set_code_cache(heap->empty_fixed_array());
- map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::kZero);
- map->set_instance_descriptors(heap->empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- map->set_prototype(heap->null_value());
- map->set_constructor_or_backpointer(heap->null_value());
-}
-
-} // namespace
-
-bool Heap::CreateInitialMaps() {
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
- if (!allocation.To(&obj)) return false;
- }
- // Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
- set_meta_map(new_meta_map);
- new_meta_map->set_map_after_allocation(new_meta_map);
-
- { // Partial map allocation
-#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
-
- ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
- fixed_array_map()->set_elements_kind(HOLEY_ELEMENTS);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
-
-#undef ALLOCATE_PARTIAL_MAP
- }
-
- // Allocate the empty array.
- {
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
- }
- set_empty_fixed_array(FixedArray::cast(obj));
-
- {
- AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_null_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kNull);
-
- {
- AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- DCHECK(!InNewSpace(undefined_value()));
- {
- AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_the_hole_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTheHole);
-
- // Set preliminary exception sentinel value before actually initializing it.
- set_exception(null_value());
-
- // Allocate the empty descriptor array.
- {
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
- }
- set_empty_descriptor_array(DescriptorArray::cast(obj));
-
- // Fix the instance_descriptors for the existing maps.
- FinalizePartialMap(this, meta_map());
- FinalizePartialMap(this, fixed_array_map());
- FinalizePartialMap(this, undefined_map());
- undefined_map()->set_is_undetectable();
- FinalizePartialMap(this, null_map());
- null_map()->set_is_undetectable();
- FinalizePartialMap(this, the_hole_map());
-
- { // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
-
-#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
- ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
-
-#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
- constructor_function_index) \
- { \
- ALLOCATE_MAP((instance_type), (size), field_name); \
- field_name##_map()->SetConstructorFunctionIndex( \
- (constructor_function_index)); \
- }
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
- fixed_cow_array_map()->set_elements_kind(HOLEY_ELEMENTS);
- DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
- ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
- ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
- Context::NUMBER_FUNCTION_INDEX)
- ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
- mutable_heap_number)
- ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
- Context::SYMBOL_FUNCTION_INDEX)
- ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
-
- ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
- Context::BOOLEAN_FUNCTION_INDEX);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
-
- ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
- js_promise_capability);
-
- for (unsigned i = 0; i < arraysize(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- {
- AllocationResult allocation = AllocateMap(entry.type, entry.size);
- if (!allocation.To(&obj)) return false;
- }
- Map* map = Map::cast(obj);
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
- // Mark cons string maps as unstable, because their objects can change
- // maps during GC.
- if (StringShape(entry.type).IsCons()) map->mark_unstable();
- roots_[entry.index] = map;
- }
-
- { // Create a separate external one byte string map for native sources.
- AllocationResult allocation =
- AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
- ExternalOneByteString::kShortSize);
- if (!allocation.To(&obj)) return false;
- Map* map = Map::cast(obj);
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
- set_native_source_string_map(map);
- }
-
- ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
- ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
- ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
- ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
- ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
- ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
- ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
-
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
-
- TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
-#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
-
- ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
-
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
- ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
- ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
- ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
- ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
-
- ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
-
- for (unsigned i = 0; i < arraysize(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- Map* map;
- if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
- roots_[entry.index] = map;
- }
-
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, unseeded_number_dictionary)
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
- native_context_map()->set_visitor_id(kVisitNativeContext);
-
- ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
- shared_function_info)
-
- ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
- external_map()->set_is_extensible(false);
-#undef ALLOCATE_PRIMITIVE_MAP
-#undef ALLOCATE_VARSIZE_MAP
-#undef ALLOCATE_MAP
- }
-
- {
- AllocationResult allocation = AllocateEmptyScopeInfo();
- if (!allocation.To(&obj)) return false;
- }
-
- set_empty_scope_info(ScopeInfo::cast(obj));
- {
- AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_true_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTrue);
-
- {
- AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_false_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kFalse);
-
- { // Empty arrays
- {
- ByteArray* byte_array;
- if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
- set_empty_byte_array(byte_array);
- }
-
- {
- PropertyArray* property_array;
- if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
- set_empty_property_array(property_array);
- }
-
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
- }
-
- TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
-#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- }
- DCHECK(!InNewSpace(empty_fixed_array()));
- return true;
-}
-
AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
@@ -2680,6 +2469,23 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
+AllocationResult Heap::AllocateBigInt(int length, bool zero_initialize,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > BigInt::kMaxLength) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
+ }
+ int size = BigInt::SizeFor(length);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
+ BigInt::cast(result)->Initialize(length, zero_initialize);
+ return result;
+}
+
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -2730,7 +2536,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
AllocationResult Heap::AllocateTransitionArray(int capacity) {
- DCHECK(capacity > 0);
+ DCHECK_LT(0, capacity);
HeapObject* raw_array = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
@@ -2749,21 +2555,6 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
return array;
}
-bool Heap::CreateApiObjects() {
- HandleScope scope(isolate());
- set_message_listeners(*TemplateList::New(isolate(), 2));
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
- if (!allocation.To(&obj)) return false;
- }
- InterceptorInfo* info = InterceptorInfo::cast(obj);
- info->set_flags(0);
- set_noop_interceptor_info(info);
- return true;
-}
-
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
@@ -2808,294 +2599,6 @@ void Heap::CreateFixedStubs() {
Heap::CreateJSConstructEntryStub();
}
-
-void Heap::CreateInitialObjects() {
- HandleScope scope(isolate());
- Factory* factory = isolate()->factory();
-
- // The -0 value must be set before NewNumber works.
- set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
- DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
-
- set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
- set_hole_nan_value(
- *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
- set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
- set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
-
- // Allocate initial string table.
- set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
-
- // Allocate
-
- // Finish initializing oddballs after creating the string table.
- Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
- factory->nan_value(), "undefined", Oddball::kUndefined);
-
- // Initialize the null_value.
- Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::kZero, isolate()), "object", Oddball::kNull);
-
- // Initialize the_hole_value.
- Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
- factory->hole_nan_value(), "undefined",
- Oddball::kTheHole);
-
- // Initialize the true_value.
- Oddball::Initialize(isolate(), factory->true_value(), "true",
- handle(Smi::FromInt(1), isolate()), "boolean",
- Oddball::kTrue);
-
- // Initialize the false_value.
- Oddball::Initialize(isolate(), factory->false_value(), "false",
- handle(Smi::kZero, isolate()), "boolean",
- Oddball::kFalse);
-
- set_uninitialized_value(
- *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
- handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kUninitialized));
-
- set_arguments_marker(
- *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
- handle(Smi::FromInt(-4), isolate()), "undefined",
- Oddball::kArgumentsMarker));
-
- set_termination_exception(*factory->NewOddball(
- factory->termination_exception_map(), "termination_exception",
- handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
-
- set_exception(*factory->NewOddball(factory->exception_map(), "exception",
- handle(Smi::FromInt(-5), isolate()),
- "undefined", Oddball::kException));
-
- set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
- "optimized_out",
- handle(Smi::FromInt(-6), isolate()),
- "undefined", Oddball::kOptimizedOut));
-
- set_stale_register(
- *factory->NewOddball(factory->stale_register_map(), "stale_register",
- handle(Smi::FromInt(-7), isolate()), "undefined",
- Oddball::kStaleRegister));
-
- for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
- Handle<String> str =
- factory->InternalizeUtf8String(constant_string_table[i].contents);
- roots_[constant_string_table[i].index] = *str;
- }
-
- // Create the code_stubs dictionary. The initial size is set to avoid
- // expanding the dictionary during bootstrapping.
- set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
-
- {
- HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
- { \
- Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
- Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
- symbol->set_name(*name##d); \
- roots_[k##name##RootIndex] = *symbol; \
- }
- PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
- }
-
- {
- HandleScope scope(isolate());
-#define SYMBOL_INIT(name, description) \
- Handle<Symbol> name = factory->NewSymbol(); \
- Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
- name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
-
-#define SYMBOL_INIT(name, description) \
- Handle<Symbol> name = factory->NewSymbol(); \
- Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
- name->set_is_well_known_symbol(true); \
- name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
-
- // Mark "Interesting Symbols" appropriately.
- to_string_tag_symbol->set_is_interesting_symbol(true);
- }
-
- Handle<NameDictionary> empty_property_dictionary =
- NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
- set_empty_property_dictionary(*empty_property_dictionary);
-
- set_public_symbol_table(*empty_property_dictionary);
- set_api_symbol_table(*empty_property_dictionary);
- set_api_private_symbol_table(*empty_property_dictionary);
-
- set_number_string_cache(
- *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
-
- // Allocate cache for single character one byte strings.
- set_single_character_string_cache(
- *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
-
- // Allocate cache for string split and regexp-multiple.
- set_string_split_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
- set_regexp_multiple_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
-
- set_undefined_cell(*factory->NewCell(factory->undefined_value()));
-
- // Microtask queue uses the empty fixed array as a sentinel for "empty".
- // Number of queued microtasks stored in Isolate::pending_microtask_count().
- set_microtask_queue(empty_fixed_array());
-
- {
- Handle<FixedArray> empty_sloppy_arguments_elements =
- factory->NewFixedArray(2, TENURED);
- empty_sloppy_arguments_elements->set_map_after_allocation(
- sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
- set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
- }
-
- {
- Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
- set_empty_weak_cell(*cell);
- cell->clear();
- }
-
- set_detached_contexts(empty_fixed_array());
- set_retained_maps(ArrayList::cast(empty_fixed_array()));
- set_retaining_path_targets(undefined_value());
-
- set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
-
- set_weak_new_space_object_to_code_list(
- ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
- weak_new_space_object_to_code_list()->SetLength(0);
-
- set_code_coverage_list(undefined_value());
-
- set_script_list(Smi::kZero);
-
- Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::New(isolate(), 1, TENURED,
- USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
- slow_element_dictionary->set_requires_slow_elements();
- set_empty_slow_element_dictionary(*slow_element_dictionary);
-
- set_materialized_objects(*factory->NewFixedArray(0, TENURED));
-
- // Handling of script id generation is in Heap::NextScriptId().
- set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_next_template_serial_number(Smi::kZero);
-
- // Allocate the empty OrderedHashTable.
- Handle<FixedArray> empty_ordered_hash_table =
- factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
- empty_ordered_hash_table->set_map_no_write_barrier(
- *factory->ordered_hash_table_map());
- for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
- empty_ordered_hash_table->set(i, Smi::kZero);
- }
- set_empty_ordered_hash_table(*empty_ordered_hash_table);
-
- // Allocate the empty script.
- Handle<Script> script = factory->NewScript(factory->empty_string());
- script->set_type(Script::TYPE_NATIVE);
- set_empty_script(*script);
-
- Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_protector(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(the_hole_value());
- set_empty_property_cell(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_iterator_protector(*cell);
-
- Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_species_protector(*cell);
-
- Handle<Cell> string_length_overflow_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_string_length_protector(*string_length_overflow_cell);
-
- Handle<Cell> fast_array_iteration_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_fast_array_iteration_protector(*fast_array_iteration_cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_buffer_neutering_protector(*cell);
-
- set_serialized_templates(empty_fixed_array());
- set_serialized_global_proxy_sizes(empty_fixed_array());
-
- set_weak_stack_trace_list(Smi::kZero);
-
- set_noscript_shared_function_infos(Smi::kZero);
-
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
- // Initialize descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Initialize compilation cache.
- isolate_->compilation_cache()->Clear();
-
- // Finish creating JSPromiseCapabilityMap
- {
- // TODO(caitp): This initialization can be removed once PromiseCapability
- // object is no longer used by builtins implemented in javascript.
- Handle<Map> map = factory->js_promise_capability_map();
- map->set_inobject_properties_or_constructor_function_index(3);
-
- Map::EnsureDescriptorSlack(map, 3);
-
- PropertyAttributes attrs =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- { // promise
- Descriptor d = Descriptor::DataField(factory->promise_string(),
- JSPromiseCapability::kPromiseIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- { // resolve
- Descriptor d = Descriptor::DataField(factory->resolve_string(),
- JSPromiseCapability::kResolveIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- { // reject
- Descriptor d = Descriptor::DataField(factory->reject_string(),
- JSPromiseCapability::kRejectIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- map->set_is_extensible(false);
- set_js_promise_capability_map(*map);
- }
-}
-
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kNumberStringCacheRootIndex:
@@ -3297,7 +2800,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
+ instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
@@ -3617,6 +3120,12 @@ AllocationResult Heap::CopyCode(Code* code) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
+
+ // Clear the trap handler index since they can't be shared between code. We
+ // have to do this before calling Relocate becauase relocate would adjust the
+ // base pointer for the old code.
+ new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
@@ -3872,7 +3381,7 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
while (stream_length != 0) {
size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
- DCHECK(c != unibrow::Utf8::kBadChar);
+ DCHECK_NE(unibrow::Utf8::kBadChar, c);
DCHECK(consumed <= stream_length);
stream_length -= consumed;
stream += consumed;
@@ -3887,8 +3396,8 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
*chars++ = c;
}
}
- DCHECK(stream_length == 0);
- DCHECK(len == 0);
+ DCHECK_EQ(0, stream_length);
+ DCHECK_EQ(0, len);
}
@@ -3907,7 +3416,7 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
template <bool is_one_byte, typename T>
AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
uint32_t hash_field) {
- DCHECK(chars >= 0);
+ DCHECK_LE(0, chars);
// Compute map and object size.
int size;
Map* map;
@@ -3962,7 +3471,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length);
- DCHECK(size <= SeqOneByteString::kMaxSize);
+ DCHECK_GE(SeqOneByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
@@ -3986,7 +3495,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length);
- DCHECK(size <= SeqTwoByteString::kMaxSize);
+ DCHECK_GE(SeqTwoByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
@@ -4237,7 +3746,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
PretenureFlag pretenure,
Object* filler) {
- DCHECK(length >= 0);
+ DCHECK_LE(0, length);
DCHECK(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
@@ -4257,7 +3766,10 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
AllocationResult Heap::AllocatePropertyArray(int length,
PretenureFlag pretenure) {
- DCHECK(length >= 0);
+ // Allow length = 0 for the empty_property_array singleton.
+ DCHECK_LE(0, length);
+ DCHECK_IMPLIES(length == 0, pretenure == TENURED);
+
DCHECK(!InNewSpace(undefined_value()));
HeapObject* result = nullptr;
{
@@ -4272,12 +3784,13 @@ AllocationResult Heap::AllocatePropertyArray(int length,
return result;
}
-AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
+AllocationResult Heap::AllocateUninitializedFixedArray(
+ int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
HeapObject* obj = nullptr;
{
- AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+ AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&obj)) return allocation;
}
@@ -4321,7 +3834,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
AllocationResult Heap::AllocateRawFeedbackVector(int length,
PretenureFlag pretenure) {
- DCHECK(length >= 0);
+ DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
@@ -4381,8 +3894,8 @@ AllocationResult Heap::AllocateSymbol() {
return result;
}
-
-AllocationResult Heap::AllocateStruct(InstanceType type) {
+AllocationResult Heap::AllocateStruct(InstanceType type,
+ PretenureFlag pretenure) {
Map* map;
switch (type) {
#define MAKE_CASE(NAME, Name, name) \
@@ -4397,7 +3910,8 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
int size = map->instance_size();
Struct* result = nullptr;
{
- AllocationResult allocation = Allocate(map, OLD_SPACE);
+ AllocationSpace space = SelectSpace(pretenure);
+ AllocationResult allocation = Allocate(map, space);
if (!allocation.To(&result)) return allocation;
}
result->InitializeBody(size);
@@ -4555,7 +4069,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -4600,8 +4114,10 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
}
}
#ifdef VERIFY_HEAP
- DCHECK(pending_layout_change_object_ == nullptr);
- pending_layout_change_object_ = object;
+ if (FLAG_verify_heap) {
+ DCHECK_NULL(pending_layout_change_object_);
+ pending_layout_change_object_ = object;
+ }
#endif
}
@@ -4624,6 +4140,8 @@ class SlotCollectingVisitor final : public ObjectVisitor {
};
void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+ if (!FLAG_verify_heap) return;
+
// Check that Heap::NotifyObjectLayout was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
@@ -4686,7 +4204,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
break;
}
case DO_FULL_GC: {
- DCHECK(contexts_disposed_ > 0);
+ DCHECK_LT(0, contexts_disposed_);
HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
@@ -5302,8 +4820,8 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
private:
inline void FixHandle(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
HeapObject* current = reinterpret_cast<HeapObject*>(*p);
- if (!current->IsHeapObject()) return;
const MapWord map_word = current->map_word();
if (!map_word.IsForwardingAddress() && current->IsFiller()) {
#ifdef DEBUG
@@ -5649,8 +5167,8 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F = R * (1 - MU) / (R * (1 - MU) - MU)
double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
double max_factor) {
- DCHECK(max_factor >= kMinHeapGrowingFactor);
- DCHECK(max_factor <= kMaxHeapGrowingFactor);
+ DCHECK_LE(kMinHeapGrowingFactor, max_factor);
+ DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
@@ -5695,8 +5213,8 @@ double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) {
- CHECK(factor > 1.0);
- CHECK(old_gen_size > 0);
+ CHECK_LT(1.0, factor);
+ CHECK_LT(0, old_gen_size);
uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
MinimumAllocationLimitGrowingStep());
@@ -5886,7 +5404,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5905,10 +5423,10 @@ bool Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
- mark_compact_collector_->weak_objects());
+ marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
- new ConcurrentMarking(this, nullptr, nullptr, nullptr);
+ new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
for (int i = 0; i <= LAST_SPACE; i++) {
@@ -5919,7 +5437,6 @@ bool Heap::SetUp() {
if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
return false;
}
- new_space_top_after_last_gc_ = new_space()->top();
space_[OLD_SPACE] = old_space_ =
new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
@@ -5938,7 +5455,7 @@ bool Heap::SetUp() {
if (!lo_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
- DCHECK(hash_seed() == 0);
+ DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5986,22 +5503,6 @@ void Heap::InitializeHashSeed() {
}
}
-bool Heap::CreateHeapObjects() {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- CreateInitialObjects();
- CHECK_EQ(0u, gc_count_);
-
- set_native_contexts_list(undefined_value());
- set_allocation_sites_list(undefined_value());
-
- return true;
-}
-
-
void Heap::SetStackLimits() {
DCHECK(isolate_ != NULL);
DCHECK(isolate_ == isolate());
@@ -6034,7 +5535,7 @@ void Heap::NotifyDeserializationComplete() {
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
- CHECK(p->NeverEvacuate());
+ DCHECK(p->NeverEvacuate());
}
#endif // DEBUG
}
@@ -6053,7 +5554,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != undefined_value() &&
js_object->GetEmbedderField(1) != undefined_value()) {
- DCHECK(reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2 == 0);
+ DCHECK_EQ(0,
+ reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
@@ -6359,7 +5861,7 @@ class CheckHandleCountVisitor : public RootVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor() override {
- CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
+ CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
handle_count_ += end - start;
@@ -6532,7 +6034,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
- : filter_(filter), marking_stack_(10) {}
+ : filter_(filter) {}
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
@@ -6544,8 +6046,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
void TransitiveClosure() {
- while (!marking_stack_.is_empty()) {
- HeapObject* obj = marking_stack_.RemoveLast();
+ while (!marking_stack_.empty()) {
+ HeapObject* obj = marking_stack_.back();
+ marking_stack_.pop_back();
obj->Iterate(this);
}
}
@@ -6556,12 +6059,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (filter_->MarkAsReachable(obj)) {
- marking_stack_.Add(obj);
+ marking_stack_.push_back(obj);
}
}
}
UnreachableObjectsFilter* filter_;
- List<HeapObject*> marking_stack_;
+ std::vector<HeapObject*> marking_stack_;
};
friend class MarkingVisitor;
@@ -6606,7 +6109,7 @@ HeapIterator::~HeapIterator() {
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
if (filtering_ != kNoFiltering) {
- DCHECK(object_iterator_ == nullptr);
+ DCHECK_NULL(object_iterator_);
}
#endif
delete space_iterator_;
@@ -6891,5 +6394,15 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
UNREACHABLE();
}
+void Heap::CreateObjectStats() {
+ if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+ if (!live_object_stats_) {
+ live_object_stats_ = new ObjectStats(this);
+ }
+ if (!dead_object_stats_) {
+ dead_object_stats_ = new ObjectStats(this);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7b87770385..687be8a3db 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -111,6 +111,7 @@ using v8::MemoryPressureLevel;
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
V(Map, property_array_map, PropertyArrayMap) \
+ V(Map, bigint_map, BigIntMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -162,6 +163,7 @@ using v8::MemoryPressureLevel;
V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, stale_register_map, StaleRegisterMap) \
/* Canonical empty values */ \
+ V(EnumCache, empty_enum_cache, EmptyEnumCache) \
V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
@@ -184,6 +186,7 @@ using v8::MemoryPressureLevel;
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
+ V(Cell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, species_protector, SpeciesProtector) \
@@ -228,9 +231,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
- /* per-Isolate map for JSPromiseCapability. */ \
- /* TODO(caitp): Make this a Struct */ \
- V(Map, js_promise_capability_map, JSPromiseCapabilityMap) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode)
@@ -267,6 +267,7 @@ using v8::MemoryPressureLevel;
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
V(ArrayProtector) \
+ V(BigIntMap) \
V(BlockContextMap) \
V(BooleanMap) \
V(ByteArrayMap) \
@@ -819,17 +820,17 @@ class Heap {
inline uint32_t HashSeed();
inline int NextScriptId();
-
- inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
- inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
- inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
- inline void SetGetterStubDeoptPCOffset(int pc_offset);
- inline void SetSetterStubDeoptPCOffset(int pc_offset);
- inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
inline int GetNextTemplateSerialNumber();
- inline void SetSerializedTemplates(FixedArray* templates);
- inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
+ void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+ void SetConstructStubCreateDeoptPCOffset(int pc_offset);
+ void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
+ void SetGetterStubDeoptPCOffset(int pc_offset);
+ void SetSetterStubDeoptPCOffset(int pc_offset);
+ void SetInterpreterEntryReturnPCOffset(int pc_offset);
+
+ void SetSerializedTemplates(FixedArray* templates);
+ void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -850,7 +851,7 @@ class Heap {
void DeoptMarkedAllocationSites();
- inline bool DeoptMaybeTenuredAllocationSites();
+ bool DeoptMaybeTenuredAllocationSites();
void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
Handle<WeakCell> code);
@@ -938,7 +939,7 @@ class Heap {
bool CreateHeapObjects();
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
- V8_INLINE void CreateObjectStats();
+ void CreateObjectStats();
// Destroys all memory allocated by the heap.
void TearDown();
@@ -1485,9 +1486,6 @@ class Heap {
Map* map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback);
- // Removes an entry from the global pretenuring storage.
- inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
-
// Merges local pretenuring feedback into the global one. Note that this
// method needs to be called after evacuation, as allocation sites may be
// evacuated and this method resolves forward pointers accordingly.
@@ -1524,7 +1522,7 @@ class Heap {
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
- void* result = base::OS::GetRandomMmapAddr();
+ void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@@ -1535,7 +1533,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
@@ -1563,16 +1561,16 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
- inline void IterateAll(RootVisitor* v);
- inline void IterateNewSpaceStrings(RootVisitor* v);
- inline void PromoteAllNewSpaceStrings();
+ void IterateAll(RootVisitor* v);
+ void IterateNewSpaceStrings(RootVisitor* v);
+ void PromoteAllNewSpaceStrings();
// Restores internal invariant and gets rid of collected strings. Must be
// called after each Iterate*() that modified the strings.
void CleanUpAll();
void CleanUpNewSpaceStrings();
- // Destroys all allocated memory.
+ // Finalize all registered external strings and clear tables.
void TearDown();
void UpdateNewSpaceReferences(
@@ -1581,9 +1579,7 @@ class Heap {
Heap::ExternalStringTableUpdaterCallback updater_func);
private:
- inline void Verify();
-
- inline void AddOldString(String* string);
+ void Verify();
Heap* const heap_;
@@ -1820,6 +1816,9 @@ class Heap {
// object in old space must not move.
void ProcessPretenuringFeedback();
+ // Removes an entry from the global pretenuring storage.
+ void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+
// ===========================================================================
// Actual GC. ================================================================
// ===========================================================================
@@ -1984,6 +1983,10 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult AllocateBigInt(int length,
+ bool zero_initialize,
+ PretenureFlag pretenure);
+
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
@@ -2084,7 +2087,8 @@ class Heap {
T t, int chars, uint32_t hash_field);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+ MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
@@ -2171,7 +2175,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
// Allocates a new utility object in the old generation.
- MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+ MUST_USE_RESULT AllocationResult
+ AllocateStruct(InstanceType type, PretenureFlag pretenure = NOT_TENURED);
// Allocates a new foreign object.
MUST_USE_RESULT AllocationResult
@@ -2253,7 +2258,6 @@ class Heap {
Space* space_[LAST_SPACE + 1];
HeapState gc_state_;
int gc_post_processing_depth_;
- Address new_space_top_after_last_gc_;
// Returns the amount of external memory registered since last global gc.
uint64_t PromotedExternalMemorySize();
@@ -2639,7 +2643,7 @@ class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK(step_size >= kPointerSize);
+ DCHECK_LE(kPointerSize, step_size);
}
virtual ~AllocationObserver() {}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 92d257a669..b286289254 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -23,12 +23,26 @@
namespace v8 {
namespace internal {
-void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
- VMState<GC> state(incremental_marking_.heap()->isolate());
+void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
+ size_t size) {
+ Heap* heap = incremental_marking_.heap();
+ VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
- incremental_marking_.heap()->isolate(),
- &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
+ heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
+ if (incremental_marking_.black_allocation() && addr != nullptr) {
+ // AdvanceIncrementalMarkingOnAllocation can start black allocation.
+ // Ensure that the new object is marked black.
+ HeapObject* object = HeapObject::FromAddress(addr);
+ if (incremental_marking_.marking_state()->IsWhite(object) &&
+ !heap->InNewSpace(object)) {
+ if (heap->lo_space()->Contains(object)) {
+ incremental_marking_.marking_state()->WhiteToBlack(object);
+ } else {
+ Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
+ }
+ }
+ }
}
IncrementalMarking::IncrementalMarking(Heap* heap)
@@ -46,8 +60,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
finalize_marking_completed_(false),
trace_wrappers_toggle_(false),
request_type_(NONE),
- new_generation_observer_(*this, kAllocatedThreshold),
- old_generation_observer_(*this, kAllocatedThreshold) {
+ new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
+ old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED);
}
@@ -108,8 +122,8 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
if (marking_state()->GreyToBlack(obj)) {
if (FLAG_concurrent_marking) {
marking_worklist()->PushBailout(obj);
- } else if (!marking_worklist()->Push(obj)) {
- non_atomic_marking_state()->BlackToGrey(obj);
+ } else {
+ marking_worklist()->Push(obj);
}
}
}
@@ -200,27 +214,24 @@ class IncrementalMarkingMarkingVisitor final
int start_offset =
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
if (start_offset < object_size) {
+ // Ensure that the object is either grey or black before pushing it
+ // into marking worklist.
+ incremental_marking_->marking_state()->WhiteToGrey(object);
if (FLAG_concurrent_marking) {
incremental_marking_->marking_worklist()->PushBailout(object);
} else {
- if (incremental_marking_->marking_state()->IsGrey(object)) {
- incremental_marking_->marking_worklist()->Push(object);
- } else {
- DCHECK(incremental_marking_->marking_state()->IsBlack(object));
- collector_->PushBlack(object);
- }
+ incremental_marking_->marking_worklist()->Push(object);
}
+ DCHECK(incremental_marking_->marking_state()->IsGrey(object) ||
+ incremental_marking_->marking_state()->IsBlack(object));
+
int end_offset =
Min(object_size, start_offset + kProgressBarScanningChunk);
int already_scanned_offset = start_offset;
- bool scan_until_end = false;
- do {
- VisitPointers(object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = incremental_marking_->marking_worklist()->IsFull();
- } while (scan_until_end && start_offset < object_size);
+ VisitPointers(object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
incremental_marking_->NotifyIncompleteScanOfObject(
@@ -414,7 +425,6 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
}
}
-
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -436,6 +446,12 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
}
+void IncrementalMarking::Deactivate() {
+ DeactivateIncrementalWriteBarrier();
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+ RecordWriteStub::STORE_BUFFER_ONLY);
+}
+
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
int old_generation_size_mb =
@@ -529,8 +545,6 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- marking_worklist()->StartUsing();
-
ActivateIncrementalWriteBarrier();
// Marking bits are cleared by the sweeper.
@@ -924,11 +938,6 @@ void IncrementalMarking::Stop() {
}
IncrementalMarking::set_should_hurry(false);
- if (IsMarking()) {
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
- }
heap_->isolate()->stack_guard()->ClearGC();
SetState(STOPPED);
is_compacting_ = false;
@@ -1054,8 +1063,8 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
// leave marking work to standalone tasks. The ramp up duration and the
// target step count are chosen based on benchmarks.
const int kRampUpIntervalMs = 300;
- const size_t kTargetStepCount = 128;
- const size_t kTargetStepCountAtOOM = 16;
+ const size_t kTargetStepCount = 256;
+ const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (heap()->IsCloseToOutOfMemory(oom_slack)) {
@@ -1063,7 +1072,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
}
size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
- IncrementalMarking::kAllocatedThreshold);
+ IncrementalMarking::kMinStepSizeInBytes);
double time_passed_ms =
heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
@@ -1081,7 +1090,7 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
- if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
+ if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
@@ -1089,6 +1098,13 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);
+ if (FLAG_concurrent_marking && marking_worklist()->IsBailoutEmpty()) {
+ // The number of background tasks + the main thread.
+ size_t tasks = heap()->concurrent_marking()->TaskCount() + 1;
+ bytes_to_process = Max(IncrementalMarking::kMinStepSizeInBytes,
+ bytes_to_process / tasks);
+ }
+
size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
@@ -1121,6 +1137,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
+ if (FLAG_concurrent_marking) {
+ heap_->new_space()->ResetOriginalTop();
+ // It is safe to merge back all objects that were on hold to the shared
+ // work list at Step because we are at a safepoint where all objects
+ // are properly initialized.
+ marking_worklist()->shared()->MergeGlobalPool(
+ marking_worklist()->on_hold());
+ }
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
@@ -1160,9 +1184,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
- step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
- bytes_to_process, duration);
+ "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
+ step_origin == StepOrigin::kV8 ? "in v8" : "in task",
+ bytes_processed / KB, bytes_to_process / KB, duration);
+ }
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Concurrently marked %" PRIuS "KB\n",
+ heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
return bytes_processed;
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 0395ab3a48..0579c9c676 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -82,6 +82,35 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool paused_;
};
+ // It's hard to know how much work the incremental marker should do to make
+ // progress in the face of the mutator creating new work for it. We start
+ // of at a moderate rate of work and gradually increase the speed of the
+ // incremental marker until it completes.
+ // Do some marking every time this much memory has been allocated or that many
+ // heavy (color-checking) write barriers have been invoked.
+ static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
+ static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
+ static const size_t kMinStepSizeInBytes = 64 * KB;
+
+ static const int kStepSizeInMs = 1;
+ static const int kMaxStepSizeInMs = 5;
+
+ // This is the upper bound for how many times we allow finalization of
+ // incremental marking to be postponed.
+ static const int kMaxIdleMarkingDelayCounter = 3;
+
+#ifndef DEBUG
+ static const intptr_t kActivationThreshold = 8 * MB;
+#else
+ static const intptr_t kActivationThreshold = 0;
+#endif
+
+#ifdef V8_CONCURRENT_MARKING
+ static const AccessMode kAtomicity = AccessMode::ATOMIC;
+#else
+ static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
+#endif
+
explicit IncrementalMarking(Heap* heap);
MarkingState* marking_state() { return &marking_state_; }
@@ -112,13 +141,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
}
}
-
- State state() {
+ State state() const {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
return state_;
}
- bool should_hurry() { return should_hurry_; }
+ bool should_hurry() const { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
bool finalize_marking_completed() const {
@@ -129,15 +157,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
finalize_marking_completed_ = val;
}
- inline bool IsStopped() { return state() == STOPPED; }
+ inline bool IsStopped() const { return state() == STOPPED; }
- inline bool IsSweeping() { return state() == SWEEPING; }
+ inline bool IsSweeping() const { return state() == SWEEPING; }
- INLINE(bool IsMarking()) { return state() >= MARKING; }
+ inline bool IsMarking() const { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
+ inline bool IsMarkingIncomplete() const { return state() == MARKING; }
- inline bool IsComplete() { return state() == COMPLETE; }
+ inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
return request_type_ == FINALIZATION && !finalize_marking_completed_;
@@ -182,33 +210,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
ForceCompletionAction force_completion,
StepOrigin step_origin);
- // It's hard to know how much work the incremental marker should do to make
- // progress in the face of the mutator creating new work for it. We start
- // of at a moderate rate of work and gradually increase the speed of the
- // incremental marker until it completes.
- // Do some marking every time this much memory has been allocated or that many
- // heavy (color-checking) write barriers have been invoked.
- static const size_t kAllocatedThreshold = 64 * KB;
-
- static const int kStepSizeInMs = 1;
- static const int kMaxStepSizeInMs = 5;
-
- // This is the upper bound for how many times we allow finalization of
- // incremental marking to be postponed.
- static const int kMaxIdleMarkingDelayCounter = 3;
-
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- static const intptr_t kActivationThreshold = 0;
-#endif
-
-#ifdef V8_CONCURRENT_MARKING
- static const AccessMode kAtomicity = AccessMode::ATOMIC;
-#else
- static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
-#endif
-
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
@@ -225,10 +226,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
- INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
- INLINE(void RecordWrites(HeapObject* obj));
+ V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
+ V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
+ V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
+ Object* value);
+ V8_INLINE void RecordWrites(HeapObject* obj);
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
@@ -290,6 +292,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
marking_worklist_ = marking_worklist;
}
+ void Deactivate();
+
private:
class Observer : public AllocationObserver {
public:
@@ -303,7 +307,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_;
};
- int64_t SpaceLeftInOldSpace();
+ static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
+ bool is_compacting);
+
+ static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
void StartMarking();
@@ -317,25 +324,21 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void RetainMaps();
void ActivateIncrementalWriteBarrier(PagedSpace* space);
- static void ActivateIncrementalWriteBarrier(NewSpace* space);
+ void ActivateIncrementalWriteBarrier(NewSpace* space);
void ActivateIncrementalWriteBarrier();
- static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
- static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+ void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+ void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
- static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
- bool is_compacting);
-
- static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
-
- INLINE(intptr_t ProcessMarkingWorklist(
+ V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
- ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
+ ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
+
+ V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject* object);
- INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
// Visits the object and returns its size.
- INLINE(int VisitObject(Map* map, HeapObject* obj));
+ V8_INLINE int VisitObject(Map* map, HeapObject* obj);
void RevisitObject(HeapObject* obj);
@@ -346,6 +349,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
+ void SetState(State s) {
+ state_ = s;
+ heap_->SetIsMarkingFlag(s >= MARKING);
+ }
+
Heap* heap_;
MarkCompactCollector::MarkingWorklist* marking_worklist_;
@@ -356,11 +364,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t bytes_marked_ahead_of_schedule_;
size_t unscanned_bytes_of_large_object_;
- void SetState(State s) {
- state_ = s;
- heap_->SetIsMarkingFlag(s >= MARKING);
- }
-
// Must use SetState() above to update state_
State state_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 2023f1debb..e914ec1f6c 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -12,16 +12,9 @@
namespace v8 {
namespace internal {
-void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK(non_atomic_marking_state()->IsBlack(obj));
- if (!marking_worklist()->Push(obj)) {
- non_atomic_marking_state()->BlackToGrey(obj);
- }
-}
-
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
}
@@ -29,8 +22,8 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
}
void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
}
@@ -38,8 +31,8 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
}
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 194415e949..3d28a18c7a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -459,11 +459,10 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
}
void MarkCompactCollector::SetUp() {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- marking_worklist()->SetUp();
+ DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
+ DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
+ DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
+ DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
void MinorMarkCompactCollector::SetUp() {}
@@ -471,7 +470,9 @@ void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
- marking_worklist()->TearDown();
+ if (heap()->incremental_marking()->IsMarking()) {
+ marking_worklist()->Clear();
+ }
}
void MinorMarkCompactCollector::TearDown() {}
@@ -522,20 +523,11 @@ void MarkCompactCollector::CollectGarbage() {
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
MarkLiveObjects();
-
- DCHECK(heap_->incremental_marking()->IsStopped());
-
ClearNonLiveReferences();
+ VerifyMarking();
RecordObjectStats();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- FullMarkingVerifier verifier(heap());
- verifier.Run();
- }
-#endif
-
StartSweepSpaces();
Evacuate();
@@ -834,7 +826,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
? nullptr
: Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
- if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
+ if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
+ !p->CanAllocate())
+ continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
@@ -981,19 +975,12 @@ void MarkCompactCollector::Prepare() {
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
- heap()->concurrent_marking()->EnsureCompleted();
- heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
-
-#ifdef VERIFY_HEAP
- heap()->old_space()->VerifyLiveBytes();
- heap()->map_space()->VerifyLiveBytes();
- heap()->code_space()->VerifyLiveBytes();
-#endif
-
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
heap()->incremental_marking()->AbortBlackAllocation();
+ FinishConcurrentMarking();
+ heap()->incremental_marking()->Deactivate();
ClearMarkbits();
AbortWeakCollections();
AbortWeakObjects();
@@ -1028,6 +1015,28 @@ void MarkCompactCollector::Prepare() {
#endif
}
+void MarkCompactCollector::FinishConcurrentMarking() {
+ if (FLAG_concurrent_marking) {
+ heap()->concurrent_marking()->EnsureCompleted();
+ heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
+ }
+}
+
+void MarkCompactCollector::VerifyMarking() {
+ CHECK(marking_worklist()->IsEmpty());
+ DCHECK(heap_->incremental_marking()->IsStopped());
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ FullMarkingVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif
+#ifdef VERIFY_HEAP
+ heap()->old_space()->VerifyLiveBytes();
+ heap()->map_space()->VerifyLiveBytes();
+ heap()->code_space()->VerifyLiveBytes();
+#endif
+}
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
@@ -1123,7 +1132,7 @@ class MarkCompactMarkingVisitor final
// Marks the object black without pushing it on the marking stack. Returns
// true if object needed marking and false otherwise.
V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object) {
- if (collector_->non_atomic_marking_state()->WhiteToBlack(object)) {
+ if (collector_->atomic_marking_state()->WhiteToBlack(object)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, object);
}
@@ -1168,7 +1177,6 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
- collector_->EmptyMarkingWorklist();
}
MarkCompactCollector* const collector_;
@@ -1370,39 +1378,6 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template <class T>
-void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_worklist()->IsFull());
-
- Map* filler_map = heap()->one_pointer_filler_map();
- for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
- if ((object->map() != filler_map) &&
- non_atomic_marking_state()->GreyToBlack(object)) {
- PushBlack(object);
- if (marking_worklist()->IsFull()) return;
- }
- }
-}
-
-void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
- DCHECK(!marking_worklist()->IsFull());
- for (auto object_and_size : LiveObjectRange<kGreyObjects>(
- p, non_atomic_marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
- bool success = non_atomic_marking_state()->GreyToBlack(object);
- DCHECK(success);
- USE(success);
- PushBlack(object);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
class RecordMigratedSlotVisitor : public ObjectVisitor {
public:
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
@@ -1811,23 +1786,6 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
- for (Page* p : *space) {
- DiscoverGreyObjectsOnPage(p);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
-
-void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
- NewSpace* space = heap()->new_space();
- for (Page* page : PageRange(space->bottom(), space->top())) {
- DiscoverGreyObjectsOnPage(page);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
-
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
@@ -1842,10 +1800,9 @@ void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- if (non_atomic_marking_state()->WhiteToBlack(string_table)) {
+ if (atomic_marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
string_table->IteratePrefix(custom_root_body_visitor);
- ProcessMarkingWorklist();
}
}
@@ -1858,78 +1815,29 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
// Custom marking for string table and top optimized frame.
MarkStringTable(custom_root_body_visitor);
ProcessTopOptimizedFrame(custom_root_body_visitor);
-
- // There may be overflowed objects in the heap. Visit them now.
- while (marking_worklist()->overflowed()) {
- RefillMarkingWorklist();
- EmptyMarkingWorklist();
- }
}
-// Mark all objects reachable from the objects on the marking stack.
-// Before: the marking stack contains zero or more heap object pointers.
-// After: the marking stack is empty, and all objects reachable from the
-// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingWorklist() {
+void MarkCompactCollector::ProcessMarkingWorklist() {
HeapObject* object;
MarkCompactMarkingVisitor visitor(this);
while ((object = marking_worklist()->Pop()) != nullptr) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(non_atomic_marking_state()->IsWhite(object)));
-
+ DCHECK(!(atomic_marking_state()->IsWhite(object)));
+ atomic_marking_state()->GreyToBlack(object);
Map* map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
}
- DCHECK(marking_worklist()->IsEmpty());
-}
-
-
-// Sweep the heap for overflowed objects, clear their overflow bits, and
-// push them on the marking stack. Stop early if the marking stack fills
-// before sweeping completes. If sweeping completes, there are no remaining
-// overflowed objects in the heap so the overflow flag on the markings stack
-// is cleared.
-void MarkCompactCollector::RefillMarkingWorklist() {
- isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
- DCHECK(marking_worklist()->overflowed());
-
- DiscoverGreyObjectsInNewSpace();
- if (marking_worklist()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_worklist()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_worklist()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_worklist()->IsFull()) return;
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_worklist()->IsFull()) return;
-
- marking_worklist()->ClearOverflowed();
-}
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack. Before: the marking stack contains zero or more heap object
-// pointers. After: the marking stack is empty and there are no overflowed
-// objects in the heap.
-void MarkCompactCollector::ProcessMarkingWorklist() {
- EmptyMarkingWorklist();
- while (marking_worklist()->overflowed()) {
- RefillMarkingWorklist();
- EmptyMarkingWorklist();
- }
- DCHECK(marking_worklist()->IsEmpty());
+ DCHECK(marking_worklist()->IsBailoutEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
bool only_process_harmony_weak_collections) {
- DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
+ DCHECK(marking_worklist()->IsEmpty());
bool work_to_do = true;
while (work_to_do) {
if (!only_process_harmony_weak_collections) {
@@ -1959,7 +1867,7 @@ void MarkCompactCollector::ProcessEphemeralMarking(
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
- if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+ if (it.frame()->type() == StackFrame::INTERPRETED) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
@@ -1967,7 +1875,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingWorklist();
return;
}
}
@@ -2102,7 +2009,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
if (marking_state_->WhiteToGrey(object)) {
collector_->main_marking_visitor()->Visit(object);
- collector_->EmptyMarkingWorklist();
+ collector_->ProcessMarkingWorklist();
}
}
@@ -2293,12 +2200,12 @@ class GlobalHandlesMarkingItem : public MarkingItem {
: task_(task) {}
void VisitRootPointer(Root root, Object** p) override {
- DCHECK(Root::kGlobalHandles == root);
+ DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
- DCHECK(Root::kGlobalHandles == root);
+ DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
}
@@ -2419,10 +2326,6 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
}
void MinorMarkCompactCollector::ProcessMarkingWorklist() {
- EmptyMarkingWorklist();
-}
-
-void MinorMarkCompactCollector::EmptyMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
HeapObject* object = nullptr;
while (marking_worklist.Pop(&object)) {
@@ -2488,7 +2391,7 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
- DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
@@ -2563,6 +2466,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
void MinorMarkCompactCollector::Evacuate() {
@@ -2588,9 +2493,6 @@ void MinorMarkCompactCollector::Evacuate() {
}
}
- // Give pages that are queued to be freed back to the OS.
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
-
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
@@ -2633,8 +2535,6 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- marking_worklist()->StartUsing();
-
heap_->local_embedder_heap_tracer()->EnterFinalPause();
RootMarkingVisitor root_visitor(this);
@@ -2646,8 +2546,21 @@ void MarkCompactCollector::MarkLiveObjects() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
+ if (FLAG_concurrent_marking) {
+ heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ }
+ ProcessMarkingWorklist();
+
+ FinishConcurrentMarking();
+ ProcessMarkingWorklist();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
+ DCHECK(marking_worklist()->IsEmpty());
+
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic or through Harmony weak maps.
@@ -2655,6 +2568,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
ProcessEphemeralMarking(false);
+ DCHECK(marking_worklist()->IsEmpty());
}
// The objects reachable from the roots, weak maps or object groups
@@ -2671,12 +2585,12 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject);
ProcessMarkingWorklist();
}
- // Then we mark the objects.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
+ &root_visitor);
ProcessMarkingWorklist();
}
@@ -2692,8 +2606,18 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
+ DCHECK(marking_worklist()->IsEmpty());
+ }
+
+ {
+ heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
+ &IsUnmarkedHeapObject);
}
}
+
+ if (was_marked_incrementally_) {
+ heap()->incremental_marking()->Deactivate();
+ }
}
@@ -2876,12 +2800,8 @@ bool MarkCompactCollector::CompactTransitionArray(
RecordSlot(transitions, key_slot, key);
Object* raw_target = transitions->GetRawTarget(i);
transitions->SetTarget(transition_index, raw_target);
- // Maps are not compacted, but for cached handlers the target slot
- // must be recorded.
- if (!raw_target->IsMap()) {
- Object** target_slot = transitions->GetTargetSlot(transition_index);
- RecordSlot(transitions, target_slot, raw_target);
- }
+ Object** target_slot = transitions->GetTargetSlot(transition_index);
+ RecordSlot(transitions, target_slot, raw_target);
}
transition_index++;
}
@@ -2920,7 +2840,7 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
to_trim * DescriptorArray::kEntrySize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
- if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+ TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
@@ -2942,16 +2862,17 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
+ EnumCache* enum_cache = descriptors->GetEnumCache();
- FixedArray* enum_cache = descriptors->GetEnumCache();
-
- int to_trim = enum_cache->length() - live_enum;
+ FixedArray* keys = enum_cache->keys();
+ int to_trim = keys->length() - live_enum;
if (to_trim <= 0) return;
- heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
+ heap_->RightTrimFixedArray(keys, to_trim);
- if (!descriptors->HasEnumIndicesCache()) return;
- FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
+ FixedArray* indices = enum_cache->indices();
+ to_trim = indices->length() - live_enum;
+ if (to_trim <= 0) return;
+ heap_->RightTrimFixedArray(indices, to_trim);
}
@@ -3194,8 +3115,12 @@ void MarkCompactCollector::EvacuatePrologue() {
void MarkCompactCollector::EvacuateEpilogue() {
// New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Deallocate unmarked large objects.
+ heap()->lo_space()->FreeUnmarkedObjects();
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
@@ -3645,7 +3570,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
- DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -3933,9 +3858,9 @@ class UpdatingItem : public ItemParallelJob::Item {
virtual void Process() = 0;
};
-class PointersUpatingTask : public ItemParallelJob::Task {
+class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpatingTask(Isolate* isolate)
+ explicit PointersUpdatingTask(Isolate* isolate)
: ItemParallelJob::Task(isolate) {}
void RunInParallel() override {
@@ -4300,7 +4225,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
updating_job.Run();
}
@@ -4322,7 +4247,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages, old_to_new_slots_);
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
updating_job.Run();
}
@@ -4370,7 +4295,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages, old_to_new_slots_);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
{
@@ -4464,7 +4389,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
old_space_evacuation_pages_.clear();
compacting_ = false;
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
@@ -4633,9 +4557,6 @@ void MarkCompactCollector::StartSweepSpaces() {
}
sweeper().StartSweeping();
}
-
- // Deallocate unmarked large objects.
- heap_->lo_space()->FreeUnmarkedObjects();
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index a6b6ead8de..1784a32e16 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -9,7 +9,6 @@
#include <vector>
#include "src/heap/marking.h"
-#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -62,14 +61,6 @@ class MarkingStateBase {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool BlackToGrey(HeapObject* obj) {
- MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
- MarkBit markbit = MarkBitFrom(p, obj->address());
- if (!Marking::BlackToGrey<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, -obj->Size());
- return true;
- }
-
V8_INLINE bool WhiteToGrey(HeapObject* obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
@@ -274,8 +265,7 @@ class MarkCompactCollectorBase {
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
- // stack.
- virtual void EmptyMarkingWorklist() = 0;
+ // work list.
virtual void ProcessMarkingWorklist() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
@@ -401,7 +391,6 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
void MarkRootSetInParallel();
void ProcessMarkingWorklist() override;
- void EmptyMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
@@ -487,6 +476,7 @@ struct WeakObjects {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
+ using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
static const int kMainThread = 0;
@@ -498,10 +488,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
- bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
+ void Push(HeapObject* object) {
+ bool success = shared_.Push(kMainThread, object);
+ USE(success);
+ DCHECK(success);
+ }
- bool PushBailout(HeapObject* object) {
- return bailout_.Push(kMainThread, object);
+ void PushBailout(HeapObject* object) {
+ bool success = bailout_.Push(kMainThread, object);
+ USE(success);
+ DCHECK(success);
}
HeapObject* Pop() {
@@ -510,25 +506,34 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result;
+#ifdef V8_CONCURRENT_MARKING
+ // The expectation is that this work list is empty almost all the time
+ // and we can thus avoid the emptiness checks by putting it last.
+ if (on_hold_.Pop(kMainThread, &result)) return result;
+#endif
return nullptr;
}
void Clear() {
bailout_.Clear();
shared_.Clear();
+ on_hold_.Clear();
}
- bool IsFull() { return false; }
+ bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
- bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
+ on_hold_.IsLocalEmpty(kMainThread) &&
+ bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
+ on_hold_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
- shared_.LocalSize(kMainThread));
+ shared_.LocalSize(kMainThread) +
+ on_hold_.LocalSize(kMainThread));
}
// Calls the specified callback on each element of the deques and replaces
@@ -539,24 +544,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
+ on_hold_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
-
- // These empty functions are needed to match the interface
- // of the sequential marking deque.
- void SetUp() {}
- void TearDown() { Clear(); }
- void StartUsing() {}
- void StopUsing() {}
- void ClearOverflowed() {}
- void SetOverflowed() {}
- bool overflowed() const { return false; }
+ ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
+ PrintWorklist("on_hold", &on_hold_);
}
private:
@@ -586,6 +584,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
ConcurrentMarkingWorklist shared_;
ConcurrentMarkingWorklist bailout_;
+ ConcurrentMarkingWorklist on_hold_;
};
class RootMarkingVisitor;
@@ -672,6 +671,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kClearMarkbits,
};
+ AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
+
NonAtomicMarkingState* non_atomic_marking_state() {
return &non_atomic_marking_state_;
}
@@ -689,6 +690,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// choosing spaces to compact.
void Prepare();
+ void FinishConcurrentMarking();
+
bool StartCompaction();
void AbortCompaction();
@@ -748,6 +751,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
+ void VerifyMarking();
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
@@ -774,9 +778,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
- // Pushes a black object onto the marking work list.
- V8_INLINE void PushBlack(HeapObject* obj);
-
// Marks the object black and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
@@ -796,8 +797,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(ObjectVisitor* visitor);
- void ProcessMarkingWorklist() override;
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
// the final atomic marking pause including the following:
@@ -814,22 +813,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Collects a list of dependent code from maps embedded in optimize code.
DependentCode* DependentCodeListFromNonLiveMaps();
- // This function empties the marking stack, but may leave overflowed objects
- // in the heap, in which case the marking stack's overflow flag will be set.
- void EmptyMarkingWorklist() override;
-
- // Refill the marking stack with overflowed objects from the heap. This
- // function either leaves the marking stack full or clears the overflow
- // flag on the marking stack.
- void RefillMarkingWorklist();
-
- // Helper methods for refilling the marking stack by discovering grey objects
- // on various pages of the heap. Used by {RefillMarkingWorklist} only.
- template <class T>
- void DiscoverGreyObjectsWithIterator(T* it);
- void DiscoverGreyObjectsOnPage(MemoryChunk* p);
- void DiscoverGreyObjectsInSpace(PagedSpace* space);
- void DiscoverGreyObjectsInNewSpace();
+ // Drains the main thread marking work list. Will mark all pending objects
+ // if no concurrent threads are running.
+ void ProcessMarkingWorklist() override;
// Callback function for telling whether the object *p is an unmarked
// heap object.
@@ -943,6 +929,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Sweeper sweeper_;
+ AtomicMarkingState atomic_marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class FullEvacuator;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index c76302218f..9b1fe61236 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -267,13 +267,6 @@ class Marking : public AllStatic {
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool BlackToGrey(MarkBit markbit)) {
- STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
- DCHECK(IsBlack(markbit));
- return markbit.Next().Clear<mode>();
- }
-
- template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
return markbit.Set<mode>();
}
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 0e1449bb92..a269873024 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,7 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
- DCHECK(delay_ms > 0);
+ DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 84d1f61859..0ffe75c84a 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -9,7 +9,6 @@
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/objects/code-cache-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/utils.h"
@@ -432,24 +431,10 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
SameLiveness(map_obj, array)) {
RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
- if (array->HasEnumCache()) {
- RecordFixedArrayHelper(array, array->GetEnumCache(), ENUM_CACHE_SUB_TYPE,
- 0);
- }
- if (array->HasEnumIndicesCache()) {
- RecordFixedArrayHelper(array, array->GetEnumIndicesCache(),
- ENUM_INDICES_CACHE_SUB_TYPE, 0);
- }
- }
-
- FixedArray* code_cache = map_obj->code_cache();
- if (code_cache->length() > 0) {
- if (code_cache->IsCodeCacheHashTable()) {
- RecordHashTableHelper(map_obj, CodeCacheHashTable::cast(code_cache),
- MAP_CODE_CACHE_SUB_TYPE);
- } else {
- RecordFixedArrayHelper(map_obj, code_cache, MAP_CODE_CACHE_SUB_TYPE, 0);
- }
+ EnumCache* enum_cache = array->GetEnumCache();
+ RecordFixedArrayHelper(array, enum_cache->keys(), ENUM_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(array, enum_cache->indices(),
+ ENUM_INDICES_CACHE_SUB_TYPE, 0);
}
for (DependentCode* cur_dependent_code = map_obj->dependent_code();
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 64532e74bb..dbd1e3b370 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -194,23 +194,10 @@ template <typename ConcreteVisitor>
int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
Map* map, TransitionArray* array) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- // Visit strong references.
- if (array->HasPrototypeTransitions()) {
- visitor->VisitPointer(array, array->GetPrototypeTransitionsSlot());
- }
- int num_transitions = array->number_of_entries();
- for (int i = 0; i < num_transitions; ++i) {
- visitor->VisitPointer(array, array->GetKeySlot(i));
- // A TransitionArray can hold maps or (transitioning StoreIC) handlers.
- // Maps have custom weak handling; handlers (which in turn weakly point
- // to maps) are marked strongly for now, and will be cleared during
- // compaction when the maps they refer to are dead.
- if (!array->GetRawTarget(i)->IsMap()) {
- visitor->VisitPointer(array, array->GetTargetSlot(i));
- }
- }
+ int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
+ TransitionArray::BodyDescriptor::IterateBody(array, size, visitor);
collector_->AddTransitionArray(array);
- return TransitionArray::BodyDescriptor::SizeOf(map, array);
+ return size;
}
template <typename ConcreteVisitor>
@@ -329,11 +316,6 @@ template <typename ConcreteVisitor>
int MarkingVisitor<ConcreteVisitor>::VisitMap(Map* map, Map* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- // Clears the cache of ICs related to this map.
- if (FLAG_cleanup_code_caches_at_gc) {
- object->ClearCodeCache(heap_);
- }
-
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
if (object->CanTransition()) {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index c7fb313ff6..93bbd0f524 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -83,25 +83,6 @@ static void ClearWeakList(Heap* heap, Object* list) {
}
}
-
-template <>
-struct WeakListVisitor<JSFunction> {
- static void SetWeakNext(JSFunction* function, Object* next) {
- function->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
- }
-
- static Object* WeakNext(JSFunction* function) {
- return function->next_function_link();
- }
-
- static int WeakNextOffset() { return JSFunction::kNextFunctionLinkOffset; }
-
- static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
-
- static void VisitPhantomObject(Heap*, JSFunction*) {}
-};
-
-
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code* code, Object* next) {
@@ -134,10 +115,6 @@ struct WeakListVisitor<Context> {
static void VisitLiveObject(Heap* heap, Context* context,
WeakObjectRetainer* retainer) {
- // Process the three weak lists linked off the context.
- DoWeakList<JSFunction>(heap, context, retainer,
- Context::OPTIMIZED_FUNCTIONS_LIST);
-
if (heap->gc_state() == Heap::MARK_COMPACT) {
// Record the slots of the weak entries in the native context.
for (int idx = Context::FIRST_WEAK_SLOT;
@@ -146,8 +123,7 @@ struct WeakListVisitor<Context> {
MarkCompactCollector::RecordSlot(context, slot, *slot);
}
// Code objects are always allocated in Code space, we do not have to
- // visit
- // them during scavenges.
+ // visit them during scavenges.
DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
}
@@ -171,8 +147,6 @@ struct WeakListVisitor<Context> {
}
static void VisitPhantomObject(Heap* heap, Context* context) {
- ClearWeakList<JSFunction>(heap,
- context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index b0befeb2f0..01708e7655 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -15,8 +15,11 @@
namespace v8 {
namespace internal {
+class BigInt;
+
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
+ V(BigInt) \
V(ByteArray) \
V(BytecodeArray) \
V(Cell) \
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index dd74b47945..1ea2f3493c 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -11,10 +11,8 @@
namespace v8 {
namespace internal {
-namespace {
-
// White list for objects that for sure only contain data.
-bool ContainsOnlyData(VisitorId visitor_id) {
+bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
switch (visitor_id) {
case kVisitSeqOneByteString:
return true;
@@ -32,8 +30,6 @@ bool ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-} // namespace
-
void Scavenger::PageMemoryFence(Object* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index ac55d011a0..fc70f60483 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,6 +4,7 @@
#include "src/heap/scavenger.h"
+#include "src/heap/barrier.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
@@ -72,26 +73,20 @@ Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
+ // We are not collecting slots on new space objects during mutation thus we
+ // have to scan for pointers to evacuation candidates when we promote
+ // objects. But we should not record any slots in non-black objects. Grey
+ // object's slots would be rescanned. White object might not survive until
+ // the end of collection it would be a violation of the invariant to record
+ // its slots.
const bool record_slots =
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
- if (target->IsJSFunction()) {
- // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
- // this links are recorded during processing of weak lists.
- JSFunction::BodyDescriptorWeak::IterateBody(target, size, &visitor);
- } else {
- target->IterateBody(target->map()->instance_type(), size, &visitor);
- }
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
}
-void Scavenger::Process(Barrier* barrier) {
+void Scavenger::Process(OneshotBarrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 4f80a25357..1437092874 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -14,54 +14,16 @@
namespace v8 {
namespace internal {
-static const int kCopiedListSegmentSize = 256;
-static const int kPromotionListSegmentSize = 256;
-
-using AddressRange = std::pair<Address, Address>;
-using ObjectAndSize = std::pair<HeapObject*, int>;
-using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
-using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
+class OneshotBarrier;
class Scavenger {
public:
- class Barrier {
- public:
- Barrier() : tasks_(0), waiting_(0), done_(false) {}
-
- void Start() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- tasks_++;
- }
-
- void NotifyAll() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (waiting_ > 0) condition_.NotifyAll();
- }
-
- void Wait() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- waiting_++;
- if (waiting_ == tasks_) {
- done_ = true;
- condition_.NotifyAll();
- } else {
- // Spurious wakeup is ok here.
- condition_.Wait(&mutex_);
- }
- waiting_--;
- }
-
- void Reset() { done_ = false; }
-
- bool Done() { return done_; }
-
- private:
- base::ConditionVariable condition_;
- base::Mutex mutex_;
- int tasks_;
- int waiting_;
- bool done_;
- };
+ static const int kCopiedListSegmentSize = 256;
+ static const int kPromotionListSegmentSize = 256;
+
+ using ObjectAndSize = std::pair<HeapObject*, int>;
+ using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id);
@@ -77,7 +39,7 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
- void Process(Barrier* barrier = nullptr);
+ void Process(OneshotBarrier* barrier = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
@@ -130,6 +92,8 @@ class Scavenger {
void RecordCopiedObject(HeapObject* obj);
+ static inline bool ContainsOnlyData(VisitorId visitor_id);
+
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
diff --git a/deps/v8/src/heap/sequential-marking-deque.cc b/deps/v8/src/heap/sequential-marking-deque.cc
deleted file mode 100644
index 4f3edb0e69..0000000000
--- a/deps/v8/src/heap/sequential-marking-deque.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/sequential-marking-deque.h"
-
-#include "src/allocation.h"
-#include "src/base/bits.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-
-namespace v8 {
-namespace internal {
-
-void SequentialMarkingDeque::SetUp() {
- base::VirtualMemory reservation;
- if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) {
- V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
- }
- backing_store_committed_size_ = 0;
- backing_store_.TakeControl(&reservation);
-}
-
-void SequentialMarkingDeque::TearDown() {
- if (backing_store_.IsReserved()) backing_store_.Release();
-}
-
-void SequentialMarkingDeque::StartUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (in_use_) {
- // This can happen in mark-compact GC if the incremental marker already
- // started using the marking deque.
- return;
- }
- in_use_ = true;
- EnsureCommitted();
- array_ = reinterpret_cast<HeapObject**>(backing_store_.address());
- size_t size = FLAG_force_marking_deque_overflows
- ? 64 * kPointerSize
- : backing_store_committed_size_;
- DCHECK(base::bits::IsPowerOfTwo(static_cast<uint32_t>(size / kPointerSize)));
- mask_ = static_cast<int>((size / kPointerSize) - 1);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void SequentialMarkingDeque::StopUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (!in_use_) return;
- DCHECK(IsEmpty());
- DCHECK(!overflowed_);
- top_ = bottom_ = mask_ = 0;
- in_use_ = false;
- if (FLAG_concurrent_sweeping) {
- StartUncommitTask();
- } else {
- Uncommit();
- }
-}
-
-void SequentialMarkingDeque::Clear() {
- DCHECK(in_use_);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void SequentialMarkingDeque::Uncommit() {
- DCHECK(!in_use_);
- bool success = backing_store_.Uncommit(backing_store_.address(),
- backing_store_committed_size_);
- backing_store_committed_size_ = 0;
- CHECK(success);
-}
-
-void SequentialMarkingDeque::EnsureCommitted() {
- DCHECK(in_use_);
- if (backing_store_committed_size_ > 0) return;
-
- for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
- if (backing_store_.Commit(backing_store_.address(), size, false)) {
- backing_store_committed_size_ = size;
- break;
- }
- }
- if (backing_store_committed_size_ == 0) {
- V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
- }
-}
-
-void SequentialMarkingDeque::StartUncommitTask() {
- if (!uncommit_task_pending_) {
- uncommit_task_pending_ = true;
- UncommitTask* task = new UncommitTask(heap_->isolate(), this);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/sequential-marking-deque.h b/deps/v8/src/heap/sequential-marking-deque.h
deleted file mode 100644
index 670a12ca0e..0000000000
--- a/deps/v8/src/heap/sequential-marking-deque.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
-#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
-
-#include <deque>
-
-#include "src/base/platform/mutex.h"
-#include "src/base/platform/platform.h"
-#include "src/cancelable-task.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-class HeapObject;
-
-// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
-class SequentialMarkingDeque {
- public:
- explicit SequentialMarkingDeque(Heap* heap)
- : backing_store_committed_size_(0),
- array_(nullptr),
- top_(0),
- bottom_(0),
- mask_(0),
- overflowed_(false),
- in_use_(false),
- uncommit_task_pending_(false),
- heap_(heap) {}
-
- void SetUp();
- void TearDown();
-
- // Ensures that the marking deque is committed and will stay committed until
- // StopUsing() is called.
- void StartUsing();
- void StopUsing();
- void Clear();
-
- inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
-
- inline bool IsEmpty() { return top_ == bottom_; }
-
- int Size() {
- // Return (top - bottom + capacity) % capacity, where capacity = mask + 1.
- return (top_ - bottom_ + mask_ + 1) & mask_;
- }
-
- bool overflowed() const { return overflowed_; }
-
- void ClearOverflowed() { overflowed_ = false; }
-
- void SetOverflowed() { overflowed_ = true; }
-
- // Push the object on the marking stack if there is room, otherwise mark the
- // deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Push(HeapObject* object)) {
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- return true;
- }
- }
-
- INLINE(HeapObject* Pop()) {
- if (IsEmpty()) return nullptr;
- top_ = ((top_ - 1) & mask_);
- HeapObject* object = array_[top_];
- return object;
- }
-
- // Calls the specified callback on each element of the deque and replaces
- // the element with the result of the callback. If the callback returns
- // nullptr then the element is removed from the deque.
- // The callback must accept HeapObject* and return HeapObject*.
- template <typename Callback>
- void Update(Callback callback) {
- int i = bottom_;
- int new_top = bottom_;
- while (i != top_) {
- if (callback(array_[i], &array_[new_top])) {
- new_top = (new_top + 1) & mask_;
- }
- i = (i + 1) & mask_;
- }
- top_ = new_top;
- }
-
- private:
- // This task uncommits the marking_deque backing store if
- // markin_deque->in_use_ is false.
- class UncommitTask : public CancelableTask {
- public:
- explicit UncommitTask(Isolate* isolate,
- SequentialMarkingDeque* marking_deque)
- : CancelableTask(isolate), marking_deque_(marking_deque) {}
-
- private:
- // CancelableTask override.
- void RunInternal() override {
- base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
- if (!marking_deque_->in_use_) {
- marking_deque_->Uncommit();
- }
- marking_deque_->uncommit_task_pending_ = false;
- }
-
- SequentialMarkingDeque* marking_deque_;
- DISALLOW_COPY_AND_ASSIGN(UncommitTask);
- };
-
- static const size_t kMaxSize = 4 * MB;
- static const size_t kMinSize = 256 * KB;
-
- // Must be called with mutex lock.
- void EnsureCommitted();
-
- // Must be called with mutex lock.
- void Uncommit();
-
- // Must be called with mutex lock.
- void StartUncommitTask();
-
- base::Mutex mutex_;
-
- base::VirtualMemory backing_store_;
- size_t backing_store_committed_size_;
- HeapObject** array_;
- // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
- // empty when top_ == bottom_. It is full when top_ + 1 == bottom
- // (mod mask + 1).
- int top_;
- int bottom_;
- int mask_;
- bool overflowed_;
- // in_use_ == true after taking mutex lock implies that the marking deque is
- // committed and will stay committed at least until in_use_ == false.
- bool in_use_;
- bool uncommit_task_pending_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SEQUENTIAL_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
new file mode 100644
index 0000000000..592fb53a7f
--- /dev/null
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -0,0 +1,629 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/setup-isolate.h"
+
+#include "src/ast/context-slot-cache.h"
+#include "src/compilation-cache.h"
+#include "src/contexts.h"
+#include "src/factory.h"
+#include "src/heap-symbols.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/layout-descriptor.h"
+#include "src/lookup-cache.h"
+#include "src/objects-inl.h"
+#include "src/objects/arguments.h"
+#include "src/objects/debug-objects.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
+#include "src/objects/map.h"
+#include "src/objects/module.h"
+#include "src/objects/script.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
+ return heap->CreateHeapObjects();
+}
+
+bool Heap::CreateHeapObjects() {
+ // Create initial maps.
+ if (!CreateInitialMaps()) return false;
+ if (!CreateApiObjects()) return false;
+
+ // Create initial objects
+ CreateInitialObjects();
+ CHECK_EQ(0u, gc_count_);
+
+ set_native_contexts_list(undefined_value());
+ set_allocation_sites_list(undefined_value());
+
+ return true;
+}
+
+const Heap::StringTypeTable Heap::string_type_table[] = {
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+ {type, size, k##camel_name##MapRootIndex},
+ STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#undef STRING_TYPE_ELEMENT
+};
+
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+ {"", kempty_stringRootIndex},
+#define CONSTANT_STRING_ELEMENT(name, contents) {contents, k##name##RootIndex},
+ INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
+};
+
+const Heap::StructTable Heap::struct_table[] = {
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
+ {NAME##_TYPE, Name::kSize, k##Name##MapRootIndex},
+ STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#undef STRUCT_TABLE_ELEMENT
+};
+
+namespace {
+
+void FinalizePartialMap(Heap* heap, Map* map) {
+ map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
+ map->set_raw_transitions(Smi::kZero);
+ map->set_instance_descriptors(heap->empty_descriptor_array());
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ map->set_prototype(heap->null_value());
+ map->set_constructor_or_backpointer(heap->null_value());
+}
+
+} // namespace
+
+bool Heap::CreateInitialMaps() {
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+ if (!allocation.To(&obj)) return false;
+ }
+ // Map::cast cannot be used due to uninitialized map field.
+ Map* new_meta_map = reinterpret_cast<Map*>(obj);
+ set_meta_map(new_meta_map);
+ new_meta_map->set_map_after_allocation(new_meta_map);
+
+ { // Partial map allocation
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
+
+ ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel,
+ fixed_cow_array)
+ DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
+
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+
+#undef ALLOCATE_PARTIAL_MAP
+ }
+
+ // Allocate the empty array.
+ {
+ AllocationResult allocation = AllocateEmptyFixedArray();
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_fixed_array(FixedArray::cast(obj));
+
+ {
+ AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_null_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kNull);
+
+ {
+ AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_undefined_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ DCHECK(!InNewSpace(undefined_value()));
+ {
+ AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_the_hole_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTheHole);
+
+ // Set preliminary exception sentinel value before actually initializing it.
+ set_exception(null_value());
+
+ // Setup the struct maps first (needed for the EnumCache).
+ for (unsigned i = 0; i < arraysize(struct_table); i++) {
+ const StructTable& entry = struct_table[i];
+ Map* map;
+ if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
+ roots_[entry.index] = map;
+ }
+
+ // Allocate the empty enum cache.
+ {
+ AllocationResult allocation = Allocate(tuple2_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_enum_cache(EnumCache::cast(obj));
+ EnumCache::cast(obj)->set_keys(empty_fixed_array());
+ EnumCache::cast(obj)->set_indices(empty_fixed_array());
+
+ // Allocate the empty descriptor array.
+ {
+ AllocationResult allocation =
+ AllocateUninitializedFixedArray(DescriptorArray::kFirstIndex, TENURED);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_descriptor_array(DescriptorArray::cast(obj));
+ DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
+ Smi::kZero);
+ DescriptorArray::cast(obj)->set(DescriptorArray::kEnumCacheIndex,
+ empty_enum_cache());
+
+ // Fix the instance_descriptors for the existing maps.
+ FinalizePartialMap(this, meta_map());
+ FinalizePartialMap(this, fixed_array_map());
+ FinalizePartialMap(this, fixed_cow_array_map());
+ FinalizePartialMap(this, undefined_map());
+ undefined_map()->set_is_undetectable();
+ FinalizePartialMap(this, null_map());
+ null_map()->set_is_undetectable();
+ FinalizePartialMap(this, the_hole_map());
+ for (unsigned i = 0; i < arraysize(struct_table); ++i) {
+ const StructTable& entry = struct_table[i];
+ FinalizePartialMap(this, Map::cast(roots_[entry.index]));
+ }
+
+ { // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
+
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+
+#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
+ constructor_function_index) \
+ { \
+ ALLOCATE_MAP((instance_type), (size), field_name); \
+ field_name##_map()->SetConstructorFunctionIndex( \
+ (constructor_function_index)); \
+ }
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+ ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
+ ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
+ Context::NUMBER_FUNCTION_INDEX)
+ ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+ mutable_heap_number)
+ ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
+ Context::SYMBOL_FUNCTION_INDEX)
+ ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+
+ ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
+ Context::BOOLEAN_FUNCTION_INDEX);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
+ ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint);
+
+ for (unsigned i = 0; i < arraysize(string_type_table); i++) {
+ const StringTypeTable& entry = string_type_table[i];
+ {
+ AllocationResult allocation = AllocateMap(entry.type, entry.size);
+ if (!allocation.To(&obj)) return false;
+ }
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ // Mark cons string maps as unstable, because their objects can change
+ // maps during GC.
+ if (StringShape(entry.type).IsCons()) map->mark_unstable();
+ roots_[entry.index] = map;
+ }
+
+ { // Create a separate external one byte string map for native sources.
+ AllocationResult allocation =
+ AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
+ ExternalOneByteString::kShortSize);
+ if (!allocation.To(&obj)) return false;
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ set_native_source_string_map(map);
+ }
+
+ ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+ fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+ ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
+ ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+ ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
+
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+
+ TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
+
+ ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
+
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+ ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
+ ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+
+ ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
+
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_table)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, unseeded_number_dictionary)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ native_context_map()->set_visitor_id(kVisitNativeContext);
+
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+ shared_function_info)
+
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+ external_map()->set_is_extensible(false);
+#undef ALLOCATE_PRIMITIVE_MAP
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
+ }
+
+ {
+ AllocationResult allocation = AllocateEmptyScopeInfo();
+ if (!allocation.To(&obj)) return false;
+ }
+
+ set_empty_scope_info(ScopeInfo::cast(obj));
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_true_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTrue);
+
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_false_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kFalse);
+
+ { // Empty arrays
+ {
+ ByteArray * byte_array;
+ if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
+ set_empty_byte_array(byte_array);
+ }
+
+ {
+ PropertyArray* property_array;
+ if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
+ set_empty_property_array(property_array);
+ }
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
+
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
+ }
+ DCHECK(!InNewSpace(empty_fixed_array()));
+ return true;
+}
+
+bool Heap::CreateApiObjects() {
+ HandleScope scope(isolate());
+ set_message_listeners(*TemplateList::New(isolate(), 2));
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
+ if (!allocation.To(&obj)) return false;
+ }
+ InterceptorInfo* info = InterceptorInfo::cast(obj);
+ info->set_flags(0);
+ set_noop_interceptor_info(info);
+ return true;
+}
+
+void Heap::CreateInitialObjects() {
+ HandleScope scope(isolate());
+ Factory* factory = isolate()->factory();
+
+ // The -0 value must be set before NewNumber works.
+ set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
+ DCHECK(std::signbit(minus_zero_value()->Number()));
+
+ set_nan_value(*factory->NewHeapNumber(
+ std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
+ set_hole_nan_value(
+ *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
+ set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+ set_minus_infinity_value(
+ *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
+
+ // Allocate initial string table.
+ set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
+
+ // Allocate
+
+ // Finish initializing oddballs after creating the string table.
+ Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+ factory->nan_value(), "undefined", Oddball::kUndefined);
+
+ // Initialize the null_value.
+ Oddball::Initialize(isolate(), factory->null_value(), "null",
+ handle(Smi::kZero, isolate()), "object", Oddball::kNull);
+
+ // Initialize the_hole_value.
+ Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
+ factory->hole_nan_value(), "undefined",
+ Oddball::kTheHole);
+
+ // Initialize the true_value.
+ Oddball::Initialize(isolate(), factory->true_value(), "true",
+ handle(Smi::FromInt(1), isolate()), "boolean",
+ Oddball::kTrue);
+
+ // Initialize the false_value.
+ Oddball::Initialize(isolate(), factory->false_value(), "false",
+ handle(Smi::kZero, isolate()), "boolean",
+ Oddball::kFalse);
+
+ set_uninitialized_value(
+ *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kUninitialized));
+
+ set_arguments_marker(
+ *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
+ handle(Smi::FromInt(-4), isolate()), "undefined",
+ Oddball::kArgumentsMarker));
+
+ set_termination_exception(*factory->NewOddball(
+ factory->termination_exception_map(), "termination_exception",
+ handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
+
+ set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+ handle(Smi::FromInt(-5), isolate()),
+ "undefined", Oddball::kException));
+
+ set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
+ "optimized_out",
+ handle(Smi::FromInt(-6), isolate()),
+ "undefined", Oddball::kOptimizedOut));
+
+ set_stale_register(
+ *factory->NewOddball(factory->stale_register_map(), "stale_register",
+ handle(Smi::FromInt(-7), isolate()), "undefined",
+ Oddball::kStaleRegister));
+
+ for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
+ Handle<String> str =
+ factory->InternalizeUtf8String(constant_string_table[i].contents);
+ roots_[constant_string_table[i].index] = *str;
+ }
+
+ // Create the code_stubs dictionary. The initial size is set to avoid
+ // expanding the dictionary during bootstrapping.
+ set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
+
+ {
+ HandleScope scope(isolate());
+#define SYMBOL_INIT(name) \
+ { \
+ Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
+ roots_[k##name##RootIndex] = *symbol; \
+ }
+ PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+ }
+
+ {
+ HandleScope scope(isolate());
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+
+ // Mark "Interesting Symbols" appropriately.
+ to_string_tag_symbol->set_is_interesting_symbol(true);
+ }
+
+ Handle<NameDictionary> empty_property_dictionary =
+ NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+ set_empty_property_dictionary(*empty_property_dictionary);
+
+ set_public_symbol_table(*empty_property_dictionary);
+ set_api_symbol_table(*empty_property_dictionary);
+ set_api_private_symbol_table(*empty_property_dictionary);
+
+ set_number_string_cache(
+ *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
+
+ // Allocate cache for single character one byte strings.
+ set_single_character_string_cache(
+ *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
+
+ // Allocate cache for string split and regexp-multiple.
+ set_string_split_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+ set_regexp_multiple_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+
+ set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+
+ // Microtask queue uses the empty fixed array as a sentinel for "empty".
+ // Number of queued microtasks stored in Isolate::pending_microtask_count().
+ set_microtask_queue(empty_fixed_array());
+
+ {
+ Handle<FixedArray> empty_sloppy_arguments_elements =
+ factory->NewFixedArray(2, TENURED);
+ empty_sloppy_arguments_elements->set_map_after_allocation(
+ sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
+ set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
+ }
+
+ {
+ Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
+ set_empty_weak_cell(*cell);
+ cell->clear();
+ }
+
+ set_detached_contexts(empty_fixed_array());
+ set_retained_maps(ArrayList::cast(empty_fixed_array()));
+ set_retaining_path_targets(undefined_value());
+
+ set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
+
+ set_weak_new_space_object_to_code_list(
+ ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
+ weak_new_space_object_to_code_list()->SetLength(0);
+
+ set_code_coverage_list(undefined_value());
+
+ set_script_list(Smi::kZero);
+
+ Handle<SeededNumberDictionary> slow_element_dictionary =
+ SeededNumberDictionary::New(isolate(), 1, TENURED,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
+ slow_element_dictionary->set_requires_slow_elements();
+ set_empty_slow_element_dictionary(*slow_element_dictionary);
+
+ set_materialized_objects(*factory->NewFixedArray(0, TENURED));
+
+ // Handling of script id generation is in Heap::NextScriptId().
+ set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ set_next_template_serial_number(Smi::kZero);
+
+ // Allocate the empty OrderedHashTable.
+ Handle<FixedArray> empty_ordered_hash_table =
+ factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
+ empty_ordered_hash_table->set_map_no_write_barrier(
+ *factory->ordered_hash_table_map());
+ for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
+ empty_ordered_hash_table->set(i, Smi::kZero);
+ }
+ set_empty_ordered_hash_table(*empty_ordered_hash_table);
+
+ // Allocate the empty script.
+ Handle<Script> script = factory->NewScript(factory->empty_string());
+ script->set_type(Script::TYPE_NATIVE);
+ set_empty_script(*script);
+
+ Handle<Cell> array_constructor_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_array_constructor_protector(*array_constructor_cell);
+
+ Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(the_hole_value());
+ set_empty_property_cell(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_iterator_protector(*cell);
+
+ Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_species_protector(*cell);
+
+ Handle<Cell> string_length_overflow_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_string_length_protector(*string_length_overflow_cell);
+
+ Handle<Cell> fast_array_iteration_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_fast_array_iteration_protector(*fast_array_iteration_cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_buffer_neutering_protector(*cell);
+
+ set_serialized_templates(empty_fixed_array());
+ set_serialized_global_proxy_sizes(empty_fixed_array());
+
+ set_weak_stack_trace_list(Smi::kZero);
+
+ set_noscript_shared_function_infos(Smi::kZero);
+
+ // Initialize context slot cache.
+ isolate_->context_slot_cache()->Clear();
+
+ // Initialize descriptor cache.
+ isolate_->descriptor_lookup_cache()->Clear();
+
+ // Initialize compilation cache.
+ isolate_->compilation_cache()->Clear();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index be5bdb37e2..a33d22f80c 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -201,6 +201,7 @@ void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
+ SetFlag(NEVER_EVACUATE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
@@ -279,20 +280,6 @@ bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
}
-// Try linear allocation in the page of alloc_info's allocation top. Does
-// not contain slow case logic (e.g. move to the next page or try free list
-// allocation) so it can be used by all the allocation functions and for all
-// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit()) return NULL;
-
- allocation_info_.set_top(new_top);
- return HeapObject::FromAddress(current_top);
-}
-
-
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -310,14 +297,28 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
+ return true;
+ if (free_list_.Allocate(size_in_bytes)) return true;
+ return SlowAllocateRaw(size_in_bytes);
+}
-HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment) {
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+ Address current_top = allocation_info_.top();
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, allocation_info_.limit());
+ allocation_info_.set_top(new_top);
+ return HeapObject::FromAddress(current_top);
+}
+
+HeapObject* PagedSpace::TryAllocateLinearlyAligned(
+ int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return NULL;
+ if (new_top > allocation_info_.limit()) return nullptr;
allocation_info_.set_top(new_top);
if (filler_size > 0) {
@@ -329,79 +330,55 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
return HeapObject::FromAddress(current_top);
}
-
-// Raw allocation.
AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) {
- HeapObject* object = AllocateLinearly(size_in_bytes);
-
- if (object == NULL) {
- object = free_list_.Allocate(size_in_bytes);
- if (object == NULL) {
- object = SlowAllocateRaw(size_in_bytes);
- }
- if (object != NULL && heap()->incremental_marking()->black_allocation()) {
- Address start = object->address();
- Address end = object->address() + size_in_bytes;
- Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
- }
+ if (!EnsureLinearAllocationArea(size_in_bytes)) {
+ return AllocationResult::Retry(identity());
}
-
- if (object != NULL) {
- if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
- return object;
+ HeapObject* object = AllocateLinearly(size_in_bytes);
+ DCHECK_NOT_NULL(object);
+ if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
+ SkipList::Update(object->address(), size_in_bytes);
}
-
- return AllocationResult::Retry(identity());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ return object;
}
-// Raw allocation.
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
- HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
-
- if (object == NULL) {
+ HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ if (object == nullptr) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
- object = free_list_.Allocate(allocation_size);
- if (object == NULL) {
- object = SlowAllocateRaw(allocation_size);
- }
- if (object != NULL) {
- if (heap()->incremental_marking()->black_allocation()) {
- Address start = object->address();
- Address end = object->address() + allocation_size;
- Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
- }
- if (filler_size != 0) {
- object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
- alignment);
- // Filler objects are initialized, so mark only the aligned object
- // memory as uninitialized.
- allocation_size = size_in_bytes;
- }
+ if (!EnsureLinearAllocationArea(allocation_size)) {
+ return AllocationResult::Retry(identity());
}
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ DCHECK_NOT_NULL(object);
}
-
- if (object != NULL) {
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
- return object;
- }
-
- return AllocationResult::Retry(identity());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAddress(top()),
+ Page::FromAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
+ size_t bytes_since_last =
+ top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
+
+ DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment == kDoubleAligned
@@ -411,8 +388,13 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
HeapObject* heap_obj = nullptr;
- if (!result.IsRetry() && result.To(&heap_obj)) {
- AllocationStep(heap_obj->address(), size_in_bytes);
+ if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj->address(), size_in_bytes);
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ StartNextInlineAllocationStep();
}
return result;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 74fee75673..f654c6689e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
@@ -118,12 +117,12 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
requested,
Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())),
- base::OS::GetRandomMmapAddr(), &reservation)) {
+ v8::internal::GetRandomMmapAddr(), &reservation)) {
return false;
}
@@ -140,7 +139,7 @@ bool CodeRange::SetUp(size_t requested) {
}
Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
size_t size = reservation.size() - (aligned_base - base) - reserved_area;
- allocation_list_.Add(FreeBlock(aligned_base, size));
+ allocation_list_.emplace_back(aligned_base, size);
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
@@ -148,19 +147,15 @@ bool CodeRange::SetUp(size_t requested) {
return true;
}
-
-int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right) {
- // The entire point of CodeRange is that the difference between two
- // addresses in the range can be represented as a signed 32-bit int,
- // so the cast is semantically correct.
- return static_cast<int>(left->start - right->start);
+bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
+ const FreeBlock& right) {
+ return left.start < right.start;
}
bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
@@ -168,26 +163,27 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
}
// Sort and merge the free blocks on the free list and the allocation list.
- free_list_.AddAll(allocation_list_);
- allocation_list_.Clear();
- free_list_.Sort(&CompareFreeBlockAddress);
- for (int i = 0; i < free_list_.length();) {
+ free_list_.insert(free_list_.end(), allocation_list_.begin(),
+ allocation_list_.end());
+ allocation_list_.clear();
+ std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
+ for (size_t i = 0; i < free_list_.size();) {
FreeBlock merged = free_list_[i];
i++;
// Add adjacent free blocks to the current merged block.
- while (i < free_list_.length() &&
+ while (i < free_list_.size() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++;
}
if (merged.size > 0) {
- allocation_list_.Add(merged);
+ allocation_list_.push_back(merged);
}
}
- free_list_.Clear();
+ free_list_.clear();
for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
@@ -238,24 +234,15 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Add(FreeBlock(address, length));
+ free_list_.emplace_back(address, length);
virtual_memory_.Uncommit(address, length);
}
-
-void CodeRange::TearDown() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Release();
- base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Free();
- allocation_list_.Free();
-}
-
-
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- DCHECK(allocation_list_.length() == 0 ||
- current_allocation_block_index_ < allocation_list_.length());
- if (allocation_list_.length() == 0 ||
+ DCHECK(allocation_list_.empty() ||
+ current_allocation_block_index_ < allocation_list_.size());
+ if (allocation_list_.empty() ||
requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return false;
@@ -276,7 +263,7 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
void CodeRange::ReleaseBlock(const FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Add(*block);
+ free_list_.push_back(*block);
}
@@ -313,7 +300,7 @@ void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
- // DCHECK(size_executable_ == 0);
+ // DCHECK_EQ(0, size_executable_);
capacity_ = 0;
if (last_chunk_.IsReserved()) {
@@ -429,16 +416,14 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
- if (!base::VirtualMemory::CommitRegion(base, size,
- executable == EXECUTABLE)) {
+ if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
return true;
}
-
-void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
@@ -460,7 +445,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- bool result = base::VirtualMemory::ReleaseRegion(base, size);
+ bool result = base::OS::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
}
@@ -468,8 +453,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
void* hint,
- base::VirtualMemory* controller) {
- base::VirtualMemory reservation;
+ VirtualMemory* controller) {
+ VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr;
@@ -486,9 +471,9 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, void* hint, base::VirtualMemory* controller) {
+ Executability executable, void* hint, VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size);
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
Address base =
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL;
@@ -546,7 +531,7 @@ void MemoryChunk::InitializationMemoryFence() {
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- base::VirtualMemory* reservation) {
+ VirtualMemory* reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
@@ -579,7 +564,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
- DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+ DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
@@ -593,7 +578,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
- DCHECK(page->area_size() <= Page::kAllocatableMemory);
+ DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
page->ResetAllocatedBytes();
@@ -689,7 +674,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
heap_->memory_allocator()->ZapBlock(start, length);
}
} else if (commit_size < committed_size) {
- DCHECK(commit_size > 0);
+ DCHECK_LT(0, commit_size);
// Shrink the committed area.
size_t length = committed_size - commit_size;
Address start = address() + committed_size + guard_size - length;
@@ -707,7 +692,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
return high_water_mark_.Value();
}
@@ -740,7 +725,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = nullptr;
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
void* address_hint = heap->GetRandomMmapAddr();
@@ -881,7 +866,7 @@ size_t Page::AvailableInFreeList() {
size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation.
- base::VirtualMemory* reservation = reserved_memory();
+ VirtualMemory* reservation = reserved_memory();
if (!reservation->IsReserved()) return 0;
// Shrink pages to high water mark. The water mark points either to a filler
@@ -959,7 +944,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free,
Address new_area_end) {
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
chunk->size_ -= bytes_to_free;
chunk->area_end_ = new_area_end;
@@ -987,7 +972,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_.Value(), static_cast<size_t>(size));
@@ -1006,7 +991,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
} else {
@@ -1099,7 +1084,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
return nullptr;
}
- base::VirtualMemory reservation(start, size);
+ VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
size_.Increment(size);
@@ -1120,7 +1105,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
+ if (!base::OS::UncommitRegion(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -1172,9 +1157,8 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
}
}
-
-bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
- Address start, size_t commit_size,
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
size_t reserved_size) {
// Commit page header (not executable).
Address header = start;
@@ -1229,7 +1213,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
- DCHECK(pages > 0);
+ DCHECK_LT(0, pages);
SlotSet* slot_set = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
@@ -1353,6 +1337,7 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
+ StartNextInlineAllocationStep();
}
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
@@ -1360,6 +1345,7 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_observers_.end(), observer);
DCHECK(allocation_observers_.end() != it);
allocation_observers_.erase(it);
+ StartNextInlineAllocationStep();
}
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
@@ -1368,11 +1354,12 @@ void Space::ResumeAllocationObservers() {
allocation_observers_paused_ = false;
}
-void Space::AllocationStep(Address soon_object, int size) {
+void Space::AllocationStep(int bytes_since_last, Address soon_object,
+ int size) {
if (!allocation_observers_paused_) {
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(size, soon_object, size);
+ observer->AllocationStep(bytes_since_last, soon_object, size);
}
}
}
@@ -1392,7 +1379,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
- locked_page_(nullptr) {
+ locked_page_(nullptr),
+ top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -1462,8 +1450,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
other->EmptyAllocationInfo();
// The linear allocation area of {other} should be destroyed now.
- DCHECK(other->top() == nullptr);
- DCHECK(other->limit() == nullptr);
+ DCHECK_NULL(other->top());
+ DCHECK_NULL(other->limit());
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
@@ -1480,7 +1468,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
for (Page* page : *this) {
@@ -1621,6 +1609,48 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
}
}
+void PagedSpace::DecreaseLimit(Address new_limit) {
+ Address old_limit = limit();
+ DCHECK_LE(top(), new_limit);
+ DCHECK_GE(old_limit, new_limit);
+ if (new_limit != old_limit) {
+ SetTopAndLimit(top(), new_limit);
+ Free(new_limit, old_limit - new_limit);
+ if (heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
+ old_limit);
+ }
+ }
+}
+
+Address PagedSpace::ComputeLimit(Address start, Address end,
+ size_t size_in_bytes) {
+ DCHECK_GE(end - start, size_in_bytes);
+
+ if (heap()->inline_allocation_disabled()) {
+ // Keep the linear allocation area to fit exactly the requested size.
+ return start + size_in_bytes;
+ } else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
+ identity() == OLD_SPACE && !is_local()) {
+ // Generated code may allocate inline from the linear allocation area for
+ // Old Space. To make sure we can observe these allocations, we use a lower
+ // limit.
+ size_t step = RoundSizeDownToObjectAlignment(
+ static_cast<int>(GetNextInlineAllocationStepSize()));
+ return Max(start + size_in_bytes, Min(start + step, end));
+ } else {
+ // The entire node can be used as the linear allocation area.
+ return end;
+ }
+}
+
+void PagedSpace::StartNextInlineAllocationStep() {
+ if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
+ top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
+ DecreaseLimit(ComputeLimit(top(), limit(), 0));
+ }
+}
+
void PagedSpace::MarkAllocationInfoBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
@@ -1647,7 +1677,7 @@ void PagedSpace::EmptyAllocationInfo() {
Address current_top = top();
Address current_limit = limit();
if (current_top == nullptr) {
- DCHECK(current_limit == nullptr);
+ DCHECK_NULL(current_limit);
return;
}
@@ -1666,6 +1696,12 @@ void PagedSpace::EmptyAllocationInfo() {
}
}
+ if (top_on_previous_step_) {
+ DCHECK(current_top >= top_on_previous_step_);
+ AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
+ nullptr, 0);
+ top_on_previous_step_ = 0;
+ }
SetTopAndLimit(NULL, NULL);
DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top);
@@ -1681,6 +1717,7 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ DCHECK(!top_on_previous_step_);
allocation_info_.Reset(nullptr, nullptr);
}
@@ -1770,7 +1807,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
size_t total_capacity = 0;
size_t total_allocated = 0;
for (Page* page : *this) {
- CHECK(page->SweepingDone());
+ DCHECK(page->SweepingDone());
total_capacity += page->area_size();
HeapObjectIterator it(page);
size_t real_allocated = 0;
@@ -1790,6 +1827,11 @@ void PagedSpace::VerifyCountersAfterSweeping() {
}
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
+ // We need to refine the counters on pages that are already swept and have
+ // not been moved over to the actual space. Otherwise, the AccountingStats
+ // are just an over approximation.
+ RefillFreeList();
+
size_t total_capacity = 0;
size_t total_allocated = 0;
auto marking_state =
@@ -2108,16 +2150,6 @@ void NewSpace::StartNextInlineAllocationStep() {
}
}
-void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
- Space::AddAllocationObserver(observer);
- StartNextInlineAllocationStep();
-}
-
-void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
- Space::RemoveAllocationObserver(observer);
- StartNextInlineAllocationStep();
-}
-
void NewSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0);
@@ -2126,12 +2158,28 @@ void NewSpace::PauseAllocationObservers() {
UpdateInlineAllocationLimit(0);
}
+void PagedSpace::PauseAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ if (top_on_previous_step_) {
+ int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
+ AllocationStep(bytes_allocated, nullptr, 0);
+ }
+ Space::PauseAllocationObservers();
+ top_on_previous_step_ = 0;
+}
+
void NewSpace::ResumeAllocationObservers() {
- DCHECK(top_on_previous_step_ == 0);
+ DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
+// TODO(ofrobots): refactor into SpaceWithLinearArea
+void PagedSpace::ResumeAllocationObservers() {
+ DCHECK(top_on_previous_step_ == 0);
+ Space::ResumeAllocationObservers();
+ StartNextInlineAllocationStep();
+}
void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) {
@@ -2467,16 +2515,16 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
Page* page = Page::FromAllocationAreaAddress(start);
Page* end_page = Page::FromAllocationAreaAddress(end);
SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
- CHECK_EQ(space, end_page->owner());
+ DCHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page,
// or end address is on a later page in the linked list of
// semi-space pages.
if (page == end_page) {
- CHECK_LE(start, end);
+ DCHECK_LE(start, end);
} else {
while (page != end_page) {
page = page->next_page();
- CHECK_NE(page, space->anchor());
+ DCHECK_NE(page, space->anchor());
}
}
}
@@ -2651,7 +2699,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
@@ -2877,12 +2925,8 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
-// Allocation on the old space free list. If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space. If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(size_t size_in_bytes) {
- DCHECK(size_in_bytes <= kMaxBlockSize);
+bool FreeList::Allocate(size_t size_in_bytes) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit());
#ifdef DEBUG
@@ -2907,10 +2951,9 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == nullptr) return nullptr;
+ if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes);
- size_t bytes_left = new_node_size - size_in_bytes;
#ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
@@ -2924,41 +2967,22 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
-
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->IncreaseAllocatedBytes(new_node_size,
Page::FromAddress(new_node->address()));
- if (owner_->heap()->inline_allocation_disabled()) {
- // Keep the linear allocation area empty if requested to do so, just
- // return area back to the free list instead.
- owner_->Free(new_node->address() + size_in_bytes, bytes_left);
- owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes);
- } else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking &&
- !owner_->is_local()) { // Not needed on CompactionSpaces.
- size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
- // We don't want to give too large linear areas to the allocator while
- // incremental marking is going on, because we won't check again whether
- // we want to do another increment until the linear area is used up.
- DCHECK_GE(new_node_size, size_in_bytes + linear_size);
- owner_->Free(new_node->address() + size_in_bytes + linear_size,
- new_node_size - size_in_bytes - linear_size);
- owner_->SetAllocationInfo(
- new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
- } else {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ Address start = new_node->address();
+ Address end = new_node->address() + new_node_size;
+ Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ owner_->Free(limit, end - limit);
}
+ owner_->SetAllocationInfo(start, limit);
- return new_node;
+ return true;
}
size_t FreeList::EvictFreeListItems(Page* page) {
@@ -3124,7 +3148,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
}
-HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -3134,30 +3158,30 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// entries.
return free_list_.Allocate(size_in_bytes);
}
- return nullptr;
+ return false;
}
-HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->SweepAndRefill(this);
return free_list_.Allocate(size_in_bytes);
}
- return nullptr;
+ return false;
}
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes);
}
-HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
+bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes);
}
-HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
@@ -3175,17 +3199,13 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != NULL) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr;
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
// If sweeping is still in progress try to sweep pages.
@@ -3193,8 +3213,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
@@ -3203,9 +3222,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
}
@@ -3347,14 +3364,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
- AllocationStep(object->address(), object_size);
-
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
-
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
+ AllocationStep(object_size, object->address(), object_size);
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 4f4de139e4..d386d11425 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -22,7 +22,6 @@
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
-#include "src/list.h"
#include "src/objects.h"
#include "src/objects/map.h"
#include "src/utils.h"
@@ -355,7 +354,7 @@ class MemoryChunk {
+ kUIntptrSize // uintptr_t flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
- + 2 * kPointerSize // base::VirtualMemory reservation_
+ + 2 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
@@ -632,12 +631,12 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- base::VirtualMemory* reservation);
+ VirtualMemory* reservation);
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
- base::VirtualMemory* reserved_memory() { return &reservation_; }
+ VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
uintptr_t flags_;
@@ -647,7 +646,7 @@ class MemoryChunk {
Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here.
- base::VirtualMemory reservation_;
+ VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
@@ -904,17 +903,17 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
- AllocationObserver* observer);
+ void AddAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
- AllocationObserver* observer);
+ void RemoveAllocationObserver(AllocationObserver* observer);
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
- void AllocationStep(Address soon_object, int size);
+ V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
+
+ void AllocationStep(int bytes_since_last, Address soon_object, int size);
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
@@ -1004,7 +1003,9 @@ class MemoryChunkValidator {
class CodeRange {
public:
explicit CodeRange(Isolate* isolate);
- ~CodeRange() { TearDown(); }
+ ~CodeRange() {
+ if (virtual_memory_.IsReserved()) virtual_memory_.Release();
+ }
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
@@ -1055,25 +1056,21 @@ class CodeRange {
size_t size;
};
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
- static int CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right);
+ static bool CompareFreeBlockAddress(const FreeBlock& left,
+ const FreeBlock& right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
- base::VirtualMemory virtual_memory_;
+ VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
@@ -1082,12 +1079,12 @@ class CodeRange {
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
- List<FreeBlock> free_list_;
+ std::vector<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
- List<FreeBlock> allocation_list_;
- int current_allocation_block_index_;
+ std::vector<FreeBlock> allocation_list_;
+ size_t current_allocation_block_index_;
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
@@ -1348,14 +1345,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
Executability executable, Space* space);
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
- base::VirtualMemory* controller);
+ VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
- void* hint, base::VirtualMemory* controller);
+ void* hint, VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
- void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+ void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
@@ -1381,8 +1378,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
- MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
- Address start, size_t commit_size,
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
size_t reserved_size);
CodeRange* code_range() { return code_range_; }
@@ -1445,7 +1442,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
base::AtomicValue<void*> lowest_ever_allocated_;
base::AtomicValue<void*> highest_ever_allocated_;
- base::VirtualMemory last_chunk_;
+ VirtualMemory last_chunk_;
Unmapper unmapper_;
friend class heap::TestCodeRangeScope;
@@ -1758,10 +1755,10 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
- // Allocate a block of size {size_in_bytes} from the free list. The block is
- // unitialized. A failure is returned if no block is available. The size
- // should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
+ // Finds a node of size at least size_in_bytes and sets up a linear allocation
+ // area using this node. Returns false if there is no such node and the caller
+ // has to retry allocation after collecting garbage.
+ MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
// Clear the free list.
void Reset();
@@ -2081,15 +2078,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
void ResetFreeList() { free_list_.Reset(); }
- // Set space allocation info.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
- }
-
- void SetAllocationInfo(Address top, Address limit);
+ void PauseAllocationObservers() override;
+ void ResumeAllocationObservers() override;
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo();
@@ -2194,6 +2184,21 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// multiple tasks hold locks on pages while trying to sweep each others pages.
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
+ Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
+ void SetAllocationInfo(Address top, Address limit);
+
+ private:
+ // Set space allocation info.
+ void SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(top, limit);
+ }
+ void DecreaseLimit(Address new_limit);
+ void StartNextInlineAllocationStep() override;
+ bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -2210,26 +2215,33 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// size limit has been hit.
bool Expand();
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
+ // Sets up a linear allocation area that fits the given number of bytes.
+ // Returns false if there is not enough space and the caller has to retry
+ // after collecting garbage.
+ inline bool EnsureLinearAllocationArea(int size_in_bytes);
+ // Allocates an object from the linear allocation area. Assumes that the
+ // linear allocation area is large enought to fit the object.
inline HeapObject* AllocateLinearly(int size_in_bytes);
-
- // Generic fast case allocation function that tries aligned linear allocation
- // at the address denoted by top in allocation_info_. Writes the aligned
- // allocation size, which includes the filler size, to size_in_bytes.
- inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment);
-
+ // Tries to allocate an aligned object from the linear allocation area.
+ // Returns nullptr if the linear allocation area does not fit the object.
+ // Otherwise, returns the object pointer and writes the allocation size
+ // (object size + alignment filler size) to the size_in_bytes.
+ inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
- // not successful, wait for the sweeper threads and re-try free-list
- // allocation.
- MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
- int size_in_bytes);
+ // not successful, wait for the sweeper threads and retry free-list
+ // allocation. Returns false if there is not enough space and the caller
+ // has to retry after collecting garbage.
+ MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
- // Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ // Slow path of AllocateRaw. This function is space-dependent. Returns false
+ // if there is not enough space and the caller has to retry after
+ // collecting garbage.
+ MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
- MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes);
+ // Implementation of SlowAllocateRaw. Returns false if there is not enough
+ // space and the caller has to retry after collecting garbage.
+ MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_;
@@ -2249,6 +2261,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
base::Mutex space_mutex_;
Page* locked_page_;
+ Address top_on_previous_step_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -2603,8 +2616,13 @@ class NewSpace : public Space {
return allocation_info_.limit();
}
- Address original_top() { return original_top_.Value(); }
+ void ResetOriginalTop() {
+ DCHECK_GE(top(), original_top());
+ DCHECK_LE(top(), original_limit());
+ original_top_.SetValue(top());
+ }
+ Address original_top() { return original_top_.Value(); }
Address original_limit() { return original_limit_.Value(); }
// Return the address of the first object in the active semispace.
@@ -2650,14 +2668,6 @@ class NewSpace : public Space {
UpdateInlineAllocationLimit(0);
}
- // Allows observation of inline allocation. The observer->Step() method gets
- // called after every step_size bytes have been allocated (approximately).
- // This works by adjusting the allocation limit to a lower value and adjusting
- // it after each step.
- void AddAllocationObserver(AllocationObserver* observer) override;
-
- void RemoveAllocationObserver(AllocationObserver* observer) override;
-
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
// same page, so FromSpaceStart() might be above FromSpaceEnd().
@@ -2749,8 +2759,7 @@ class NewSpace : public Space {
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
- base::VirtualMemory reservation_;
-
+ VirtualMemory reservation_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2765,7 +2774,7 @@ class NewSpace : public Space {
// different when we cross a page boundary or reset the space.
void InlineAllocationStep(Address top, Address new_top, Address soon_object,
size_t size);
- void StartNextInlineAllocationStep();
+ void StartNextInlineAllocationStep() override;
friend class SemiSpaceIterator;
};
@@ -2794,10 +2803,9 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
- MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
- int size_in_bytes) override;
+ MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override;
+ MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
};
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 981aa76649..ccefd1a058 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -32,7 +32,7 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
&reservation)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
@@ -53,7 +53,7 @@ void StoreBuffer::SetUp() {
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
- DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
}
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 2c6142792a..75da76490e 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -208,7 +208,7 @@ class StoreBuffer {
// IN_GC mode.
StoreBufferMode mode_;
- base::VirtualMemory virtual_memory_;
+ VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index f6530ec183..3421e16611 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -6,6 +6,7 @@
#define V8_HEAP_WORKLIST_
#include <cstddef>
+#include <utility>
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
@@ -168,6 +169,11 @@ class Worklist {
PublishPopSegmentToGlobal(task_id);
}
+ void MergeGlobalPool(Worklist* other) {
+ auto pair = other->global_pool_.Extract();
+ global_pool_.MergeList(pair.first, pair.second);
+ }
+
private:
FRIEND_TEST(WorkListTest, SegmentCreate);
FRIEND_TEST(WorkListTest, SegmentPush);
@@ -305,6 +311,28 @@ class Worklist {
}
}
+ std::pair<Segment*, Segment*> Extract() {
+ Segment* top = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
+ top = top_;
+ set_top(nullptr);
+ }
+ Segment* end = top;
+ while (end->next() != nullptr) end = end->next();
+ return std::make_pair(top, end);
+ }
+
+ void MergeList(Segment* start, Segment* end) {
+ if (start == nullptr) return;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ end->set_next(top_);
+ set_top(start);
+ }
+ }
+
private:
void set_top(Segment* segment) {
base::AsAtomicPointer::Relaxed_Store(&top_, segment);