summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/spaces.cc')
-rw-r--r--deps/v8/src/heap/spaces.cc682
1 files changed, 402 insertions, 280 deletions
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index f23323c135..59ce145474 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -14,6 +14,7 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
@@ -34,7 +35,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
- page_range_(space->anchor()->next_page(), space->anchor()),
+ page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(Page* page)
@@ -93,13 +94,15 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
// -----------------------------------------------------------------------------
// CodeRange
-CodeRange::CodeRange(Isolate* isolate)
+static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
+ LAZY_INSTANCE_INITIALIZER;
+
+CodeRange::CodeRange(Isolate* isolate, size_t requested)
: isolate_(isolate),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0) {}
-
-bool CodeRange::SetUp(size_t requested) {
+ current_allocation_block_index_(0),
+ requested_code_range_size_(0) {
DCHECK(!virtual_memory_.IsReserved());
if (requested == 0) {
@@ -109,7 +112,7 @@ bool CodeRange::SetUp(size_t requested) {
if (kRequiresCodeRange) {
requested = kMaximalCodeRangeSize;
} else {
- return true;
+ return;
}
}
@@ -124,11 +127,15 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+ requested_code_range_size_ = requested;
+
VirtualMemory reservation;
+ void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
if (!AlignedAllocVirtualMemory(
- requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
- GetRandomMmapAddr(), &reservation)) {
- return false;
+ requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint,
+ &reservation)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "CodeRange setup: allocate virtual memory");
}
// We are sure that we have mapped a block of requested addresses.
@@ -140,7 +147,7 @@ bool CodeRange::SetUp(size_t requested) {
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
PageAllocator::kReadWrite))
- return false;
+ V8::FatalProcessOutOfMemory(isolate, "CodeRange setup: set permissions");
base += reserved_area;
}
@@ -153,7 +160,15 @@ bool CodeRange::SetUp(size_t requested) {
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
requested));
virtual_memory_.TakeControl(&reservation);
- return true;
+}
+
+CodeRange::~CodeRange() {
+ if (virtual_memory_.IsReserved()) {
+ Address addr = start();
+ virtual_memory_.Free();
+ code_range_address_hint.Pointer()->NotifyFreedCodeRange(
+ reinterpret_cast<void*>(addr), requested_code_range_size_);
+ }
}
bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
@@ -262,31 +277,38 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
free_list_.push_back(*block);
}
+void* CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return GetRandomMmapAddr();
+ }
+ void* result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(void* code_range_start,
+ size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-MemoryAllocator::MemoryAllocator(Isolate* isolate)
+MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
+ size_t code_range_size)
: isolate_(isolate),
code_range_(nullptr),
- capacity_(0),
+ capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
size_executable_(0),
lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {}
-
-bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
- capacity_ = ::RoundUp(capacity, Page::kPageSize);
-
- size_ = 0;
- size_executable_ = 0;
-
- code_range_ = new CodeRange(isolate_);
- if (!code_range_->SetUp(code_range_size)) return false;
-
- return true;
+ unmapper_(isolate->heap(), this) {
+ code_range_ = new CodeRange(isolate_, code_range_size);
}
@@ -294,7 +316,7 @@ void MemoryAllocator::TearDown() {
unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_.Value(), 0u);
+ DCHECK_EQ(size_, 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK_EQ(0, size_executable_);
capacity_ = 0;
@@ -319,7 +341,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_.Decrement(1);
+ unmapper_->active_unmapping_tasks_--;
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(),
@@ -350,9 +372,9 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
task->id());
}
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_.Value(), 0);
- active_unmapping_tasks_.Increment(1);
+ DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_, 0);
+ active_unmapping_tasks_++;
task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
@@ -368,7 +390,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
}
}
pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_.SetValue(0);
+ active_unmapping_tasks_ = 0;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -391,7 +413,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
- if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
+ if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones.
CancelAndWaitForPendingTasks();
@@ -449,6 +471,21 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
return static_cast<int>(result);
}
+size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ size_t sum = 0;
+ // kPooled chunks are already uncommited. We only have to account for
+ // kRegular and kNonRegular chunks.
+ for (auto& chunk : chunks_[kRegular]) {
+ sum += chunk->size();
+ }
+ for (auto& chunk : chunks_[kNonRegular]) {
+ sum += chunk->size();
+ }
+ return sum;
+}
+
bool MemoryAllocator::CommitMemory(Address base, size_t size) {
if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false;
@@ -491,7 +528,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
}
Address result = reservation.address();
- size_.Increment(reservation.size());
+ size_ += reservation.size();
controller->TakeControl(&reservation);
return result;
}
@@ -523,7 +560,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Free the mapping and any partially committed
// regions inside it.
reservation.Free();
- size_.Decrement(reserve_size);
+ size_ -= reserve_size;
return kNullAddress;
}
@@ -531,14 +568,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
-void Page::InitializeAsAnchor(Space* space) {
- set_owner(space);
- set_next_chunk(this);
- set_prev_chunk(this);
- SetFlags(0, static_cast<uintptr_t>(~0));
- SetFlag(ANCHOR);
-}
-
Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
@@ -624,18 +653,21 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
- chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
- chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
+ chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
+ chunk->set_concurrent_sweeping_state(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
- chunk->set_next_chunk(nullptr);
- chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
+ chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
+ 0;
+ chunk->external_backing_store_bytes_
+ [ExternalBackingStoreType::kExternalString] = 0;
+
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
chunk->categories_[i] = nullptr;
}
@@ -678,9 +710,10 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
- heap()->incremental_marking()->SetOldSpacePageFlags(page);
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
+ page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
@@ -693,8 +726,9 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
- heap()->incremental_marking()->SetNewSpacePageFlags(page);
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
+ page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
@@ -714,19 +748,11 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
- // Initialize the owner field for each contained page (except the first, which
- // is initialized by MemoryChunk::Initialize).
- for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
- addr < chunk->area_end(); addr += Page::kPageSize) {
- // Clear out kPageHeaderTag.
- Memory::Address_at(addr) = 0;
- }
LargePage* page = static_cast<LargePage*>(chunk);
- page->InitializationMemoryFence();
+ page->list_node().Initialize();
return page;
}
@@ -753,7 +779,7 @@ void Page::ReleaseFreeListCategories() {
}
Page* Page::ConvertNewToOld(Page* old_page) {
- DCHECK(!old_page->is_anchor());
+ DCHECK(old_page);
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
@@ -766,32 +792,13 @@ Page* Page::ConvertNewToOld(Page* old_page) {
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
- return high_water_mark_.Value();
+ return high_water_mark_;
}
bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE;
}
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
- MemoryChunk* other_next = other->next_chunk();
-
- set_next_chunk(other_next);
- set_prev_chunk(other);
- other_next->set_prev_chunk(this);
- other->set_next_chunk(this);
-}
-
-
-void MemoryChunk::Unlink() {
- MemoryChunk* next_element = next_chunk();
- MemoryChunk* prev_element = prev_chunk();
- next_element->set_prev_chunk(prev_element);
- prev_element->set_next_chunk(next_element);
- set_prev_chunk(nullptr);
- set_next_chunk(nullptr);
-}
-
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -857,21 +864,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(IsAligned(base, MemoryChunk::kAlignment));
if (base == kNullAddress) return nullptr;
- size_.Increment(chunk_size);
+ size_ += chunk_size;
// Update executable memory size.
- size_executable_.Increment(chunk_size);
+ size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
address_hint, &reservation);
if (base == kNullAddress) return nullptr;
// Update executable memory size.
- size_executable_.Increment(reservation.size());
+ size_executable_ += reservation.size();
}
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
+ ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
}
area_start = base + CodePageAreaStartOffset();
@@ -889,7 +896,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
+ ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
}
area_start = base + Page::kObjectStartOffset;
@@ -911,9 +918,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation);
UncommitBlock(last_chunk_.address(), last_chunk_.size());
- size_.Decrement(chunk_size);
+ size_ -= chunk_size;
if (executable == EXECUTABLE) {
- size_executable_.Decrement(chunk_size);
+ size_executable_ -= chunk_size;
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
@@ -928,8 +935,36 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
return chunk;
}
+void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
+void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
+void Page::AllocateLocalTracker() {
+ DCHECK_NULL(local_tracker_);
+ local_tracker_ = new LocalArrayBufferTracker(this);
+}
+
+bool Page::contains_array_buffers() {
+ return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
+}
+
void Page::ResetFreeListStatistics() {
wasted_memory_ = 0;
}
@@ -1042,8 +1077,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free);
- DCHECK_GE(size_.Value(), released_bytes);
- size_.Decrement(released_bytes);
+ DCHECK_GE(size_, released_bytes);
+ size_ -= released_bytes;
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(released_bytes));
}
@@ -1058,12 +1093,12 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
- DCHECK_GE(size_.Value(), static_cast<size_t>(size));
- size_.Decrement(size);
+ DCHECK_GE(size_, static_cast<size_t>(size));
+ size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
- DCHECK_GE(size_executable_.Value(), size);
- size_executable_.Decrement(size);
+ DCHECK_GE(size_executable_, size);
+ size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -1172,7 +1207,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
- size_.Increment(size);
+ size_ += size;
return chunk;
}
@@ -1180,7 +1215,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
if (!CommitMemory(start, size)) return false;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size);
+ ZapBlock(start, size, kZapValue);
}
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
@@ -1194,10 +1229,12 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true;
}
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
+void MemoryAllocator::ZapBlock(Address start, size_t size,
+ uintptr_t zap_value) {
+ DCHECK_EQ(start % kPointerSize, 0);
+ DCHECK_EQ(size % kPointerSize, 0);
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = static_cast<Address>(kZapValue);
+ Memory::Address_at(start + s) = static_cast<Address>(zap_value);
}
}
@@ -1272,10 +1309,6 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-bool MemoryChunk::contains_array_buffers() {
- return local_tracker() != nullptr && !local_tracker()->IsEmpty();
-}
-
void MemoryChunk::ReleaseAllocatedMemory() {
if (skip_list_ != nullptr) {
delete skip_list_;
@@ -1393,11 +1426,6 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
-void MemoryChunk::AllocateLocalTracker() {
- DCHECK_NULL(local_tracker_);
- local_tracker_ = new LocalArrayBufferTracker(owner());
-}
-
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1415,6 +1443,19 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
+void MemoryChunk::IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ external_backing_store_bytes_[type] += amount;
+ owner()->IncrementExternalBackingStoreBytes(type, amount);
+}
+
+void MemoryChunk::DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ DCHECK_GE(external_backing_store_bytes_[type], amount);
+ external_backing_store_bytes_[type] -= amount;
+ owner()->DecrementExternalBackingStoreBytes(type, amount);
+}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1464,25 +1505,17 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : SpaceWithLinearArea(heap, space), executable_(executable), anchor_(this) {
+ : SpaceWithLinearArea(heap, space), executable_(executable) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
-
-bool PagedSpace::SetUp() { return true; }
-
-
-bool PagedSpace::HasBeenSetUp() { return true; }
-
-
void PagedSpace::TearDown() {
- for (auto it = begin(); it != end();) {
- Page* page = *(it++); // Will be erased.
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
}
- anchor_.set_next_page(&anchor_);
- anchor_.set_prev_page(&anchor_);
accounting_stats_.Clear();
}
@@ -1526,8 +1559,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
- // anchor_
-
other->FreeLinearAllocationArea();
// The linear allocation area of {other} should be destroyed now.
@@ -1610,20 +1641,28 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
- page->InsertAfter(anchor()->prev_page());
+ memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
IncreaseCapacity(page->area_size());
IncreaseAllocatedBytes(page->allocated_bytes(), page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
return RelinkFreeListCategories(page);
}
void PagedSpace::RemovePage(Page* page) {
CHECK(page->SweepingDone());
- page->Unlink();
+ memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
@@ -1668,7 +1707,6 @@ bool PagedSpace::Expand() {
AddPage(page);
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
- DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
return true;
}
@@ -1815,11 +1853,6 @@ void PagedSpace::ReleasePage(Page* page) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
- // If page is still in a list, unlink it from that list.
- if (page->next_chunk() != nullptr) {
- DCHECK_NOT_NULL(page->prev_chunk());
- page->Unlink();
- }
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
@@ -1904,11 +1937,23 @@ void PagedSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
-void PagedSpace::Verify(ObjectVisitor* visitor) {
+void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
+ size_t external_space_bytes[kNumTypes];
+ size_t external_page_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
for (Page* page : *this) {
CHECK(page->owner() == this);
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
@@ -1916,6 +1961,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
+
for (HeapObject* object = it.Next(); object != nullptr;
object = it.Next()) {
CHECK(end_of_previous_object <= object->address());
@@ -1931,7 +1977,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
VerifyObject(object);
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
@@ -1942,8 +1988,25 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->IterateBody(map, size, visitor);
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
+
+ if (object->IsJSArrayBuffer()) {
+ JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = NumberToSize(array_buffer->byte_length());
+ external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
+ external_space_bytes[t] += external_page_bytes[t];
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
CHECK(allocation_pointer_found_in_space);
#ifdef DEBUG
VerifyCountersAfterSweeping();
@@ -2021,24 +2084,25 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// -----------------------------------------------------------------------------
// NewSpace implementation
-bool NewSpace::SetUp(size_t initial_semispace_capacity,
- size_t maximum_semispace_capacity) {
- DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo(
- static_cast<uint32_t>(maximum_semispace_capacity)));
-
- to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
- from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity,
+ size_t max_semispace_capacity)
+ : SpaceWithLinearArea(heap, NEW_SPACE),
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace),
+ reservation_() {
+ DCHECK(initial_semispace_capacity <= max_semispace_capacity);
+ DCHECK(
+ base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
+
+ to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
if (!to_space_.Commit()) {
- return false;
+ V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetLinearAllocationArea();
-
- return true;
}
-
void NewSpace::TearDown() {
allocation_info_.Reset(kNullAddress, kNullAddress);
@@ -2101,23 +2165,29 @@ bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages =
static_cast<int>(current_capacity_ / Page::kPageSize);
+ MemoryChunk* current_page = first_page();
int actual_pages = 0;
- Page* current_page = anchor()->next_page();
- while (current_page != anchor()) {
+
+ // First iterate through the pages list until expected pages if so many
+ // pages exist.
+ while (current_page != nullptr && actual_pages < expected_pages) {
actual_pages++;
- current_page = current_page->next_page();
- if (actual_pages > expected_pages) {
- Page* to_remove = current_page->prev_page();
- // Make sure we don't overtake the actual top pointer.
- CHECK_NE(to_remove, current_page_);
- to_remove->Unlink();
- // Clear new space flags to avoid this page being treated as a new
- // space page that is potentially being swept.
- to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- to_remove);
- }
+ current_page = current_page->list_node().next();
+ }
+
+ // Free all overallocated pages which are behind current_page.
+ while (current_page) {
+ MemoryChunk* next_current = current_page->list_node().next();
+ memory_chunk_list_.Remove(current_page);
+ // Clear new space flags to avoid this page being treated as a new
+ // space page that is potentially being swept.
+ current_page->SetFlags(0, Page::kIsInNewSpaceMask);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+ current_page);
+ current_page = next_current;
}
+
+ // Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
@@ -2127,9 +2197,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
- current_page->InsertAfter(anchor());
+ memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(anchor()->prev_page()->GetFlags(),
+ current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
@@ -2191,8 +2261,8 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
- original_top_.SetValue(top());
- original_limit_.SetValue(limit());
+ original_top_ = top();
+ original_limit_ = limit();
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -2285,6 +2355,10 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
+size_t LargeObjectSpace::Available() {
+ return ObjectSizeFor(heap()->memory_allocator()->Available());
+}
+
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -2358,7 +2432,7 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify() {
+void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -2367,6 +2441,11 @@ void NewSpace::Verify() {
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
@@ -2387,23 +2466,34 @@ void NewSpace::Verify() {
CHECK(!object->IsAbstractCode());
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
+ VerifyPointersVisitor visitor(heap());
int size = object->Size();
object->IterateBody(map, size, &visitor);
+ if (object->IsJSArrayBuffer()) {
+ JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = NumberToSize(array_buffer->byte_length());
+ external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+
current += size;
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
- // Next page should be valid.
- CHECK(!page->is_anchor());
current = page->area_start();
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
@@ -2435,18 +2525,16 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- Page* current = anchor();
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
- RewindPages(current, pages_added);
+ if (pages_added) RewindPages(pages_added);
return false;
}
- new_page->InsertAfter(current);
- current = new_page;
+ memory_chunk_list_.PushBack(new_page);
}
Reset();
AccountCommitted(current_capacity_);
@@ -2460,12 +2548,12 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- for (auto it = begin(); it != end();) {
- Page* p = *(it++);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
}
- anchor()->set_next_page(anchor());
- anchor()->set_prev_page(anchor());
+ current_page_ = nullptr;
AccountUncommitted(current_capacity_);
committed_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
@@ -2492,8 +2580,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
- Page* last_page = anchor()->prev_page();
- DCHECK_NE(last_page, anchor());
+ DCHECK(last_page());
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
@@ -2501,29 +2588,26 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
- RewindPages(last_page, pages_added);
+ if (pages_added) RewindPages(pages_added);
return false;
}
- new_page->InsertAfter(last_page);
+ memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
// Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
- last_page = new_page;
+ new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
AccountCommitted(delta);
current_capacity_ = new_capacity;
return true;
}
-void SemiSpace::RewindPages(Page* start, int num_pages) {
- Page* new_last_page = nullptr;
- Page* last_page = start;
+void SemiSpace::RewindPages(int num_pages) {
+ DCHECK_GT(num_pages, 0);
+ DCHECK(last_page());
while (num_pages > 0) {
- DCHECK_NE(last_page, anchor());
- new_last_page = last_page->prev_page();
- last_page->prev_page()->set_next_page(last_page->next_page());
- last_page->next_page()->set_prev_page(last_page->prev_page());
- last_page = new_last_page;
+ MemoryChunk* last = last_page();
+ memory_chunk_list_.Remove(last);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
num_pages--;
}
}
@@ -2534,19 +2618,9 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, AllocatePageSize()));
+ DCHECK(IsAligned(delta, Page::kPageSize));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
- Page* new_last_page;
- Page* last_page;
- while (delta_pages > 0) {
- last_page = anchor()->prev_page();
- new_last_page = last_page->prev_page();
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- last_page);
- delta_pages--;
- }
+ RewindPages(delta_pages);
AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
@@ -2555,10 +2629,6 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
}
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
- anchor_.set_owner(this);
- anchor_.prev_page()->set_next_page(&anchor_);
- anchor_.next_page()->set_prev_page(&anchor_);
-
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
@@ -2579,30 +2649,41 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
void SemiSpace::Reset() {
- DCHECK_NE(anchor_.next_page(), &anchor_);
- current_page_ = anchor_.next_page();
+ DCHECK(first_page());
+ DCHECK(last_page());
+ current_page_ = first_page();
pages_used_ = 0;
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
- current_page_ = page->prev_page();
+ if (page->prev_page()) {
+ current_page_ = page->prev_page();
+ }
+ }
+ memory_chunk_list_.Remove(page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
- page->Unlink();
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this);
- page->InsertAfter(anchor());
+ memory_chunk_list_.PushFront(page);
pages_used_++;
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
- DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
- DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
+ DCHECK(from->first_page());
+ DCHECK(to->first_page());
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
@@ -2612,8 +2693,10 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
- std::swap(from->anchor_, to->anchor_);
+ std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
std::swap(from->current_page_, to->current_page_);
+ std::swap(from->external_backing_store_bytes_,
+ to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0);
@@ -2640,9 +2723,13 @@ void SemiSpace::Print() {}
#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
- Page* page = anchor_.next_page();
- CHECK(anchor_.owner() == this);
- while (page != &anchor_) {
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
@@ -2660,8 +2747,17 @@ void SemiSpace::Verify() {
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
}
}
- CHECK_EQ(page->prev_page()->next_page(), page);
- page = page->next_page();
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
+ }
+
+ CHECK_IMPLIES(page->list_node().prev(),
+ page->list_node().prev()->list_node().next() == page);
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
@@ -2681,8 +2777,8 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
} else {
while (page != end_page) {
page = page->next_page();
- DCHECK_NE(page, space->anchor());
}
+ DCHECK(page);
}
}
#endif
@@ -2692,7 +2788,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top());
+ Initialize(space->first_allocatable_address(), space->top());
}
@@ -2786,9 +2882,9 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
while (n != nullptr) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == nullptr) {
- *map_location = heap->free_space_map();
+ *map_location = ReadOnlyRoots(heap).free_space_map();
} else {
- DCHECK(*map_location == heap->free_space_map());
+ DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
}
n = n->next();
}
@@ -2823,7 +2919,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
// Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
- wasted_bytes_.Increment(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
@@ -2995,7 +3091,7 @@ size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace* cur = top();
while (cur != nullptr) {
- DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
+ DCHECK(cur->map() == page()->heap()->root(Heap::kFreeSpaceMapRootIndex));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -3184,9 +3280,8 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif
-ReadOnlySpace::ReadOnlySpace(Heap* heap, AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, id, executable),
+ReadOnlySpace::ReadOnlySpace(Heap* heap)
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
@@ -3269,7 +3364,7 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_page_;
+ current_ = space->first_page();
}
@@ -3285,33 +3380,27 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
+LargeObjectSpace::LargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, LO_SPACE) {}
+
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id), // Managed on a per-allocation basis
- first_page_(nullptr),
+ : Space(heap, id),
size_(0),
page_count_(0),
objects_size_(0),
chunk_map_(1024) {}
-LargeObjectSpace::~LargeObjectSpace() {}
-
-bool LargeObjectSpace::SetUp() {
- return true;
-}
-
void LargeObjectSpace::TearDown() {
- while (first_page_ != nullptr) {
- LargePage* page = first_page_;
- first_page_ = first_page_->next_page();
+ while (!memory_chunk_list_.Empty()) {
+ LargePage* page = first_page();
LOG(heap()->isolate(),
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
+ memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
- SetUp();
}
-
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
@@ -3321,17 +3410,35 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
+ LargePage* page = AllocateLargePage(object_size, executable);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ HeapObject* object = page->GetObject();
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ if (heap()->incremental_marking()->black_allocation()) {
+ heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ }
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
+ page->InitializationMemoryFence();
+ return object;
+}
+
+LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
+ Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
- page->set_next_page(first_page_);
- first_page_ = page;
+ memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
@@ -3341,23 +3448,13 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
// Make the object consistent so the heap can be verified in OldSpaceStep.
// We only need to do this in debug builds or if verify_heap is on.
reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
+ ReadOnlyRoots(heap()).fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
-
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
- if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
- }
AllocationStep(object_size, object->address(), object_size);
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
- return object;
+ return page;
}
@@ -3437,12 +3534,12 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = nullptr;
- LargePage* current = first_page_;
+ LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
objects_size_ = 0;
- while (current != nullptr) {
+ while (current) {
+ LargePage* next_current = current->next_page();
HeapObject* object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
@@ -3462,26 +3559,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
- previous = current;
- current = current->next_page();
} else {
- LargePage* page = current;
- // Cut the chunk out from the chunk list.
- current = current->next_page();
- if (previous == nullptr) {
- first_page_ = current;
- } else {
- previous->set_next_page(current);
- }
+ memory_chunk_list_.Remove(current);
// Free the chunk.
- size_ -= static_cast<int>(page->size());
- AccountUncommitted(page->size());
+ size_ -= static_cast<int>(current->size());
+ AccountUncommitted(current->size());
page_count_--;
- RemoveChunkMapEntries(page);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ RemoveChunkMapEntries(current);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
+ current);
}
+ current = next_current;
}
}
@@ -3504,8 +3594,14 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_; chunk != nullptr;
+void LargeObjectSpace::Verify(Isolate* isolate) {
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
@@ -3527,10 +3623,10 @@ void LargeObjectSpace::Verify() {
object->IsWeakFixedArray() || object->IsWeakArrayList() ||
object->IsPropertyArray() || object->IsByteArray() ||
object->IsFeedbackVector() || object->IsBigInt() ||
- object->IsFreeSpace());
+ object->IsFreeSpace() || object->IsFeedbackMetadata());
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
@@ -3538,7 +3634,7 @@ void LargeObjectSpace::Verify() {
// Byte arrays and strings don't have interior pointers.
if (object->IsAbstractCode()) {
- VerifyPointersVisitor code_visitor;
+ VerifyPointersVisitor code_visitor(heap());
object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
FixedArray* array = FixedArray::cast(object);
@@ -3561,13 +3657,21 @@ void LargeObjectSpace::Verify() {
}
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void LargeObjectSpace::Print() {
- OFStream os(stdout);
+ StdoutStream os;
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
obj->Print(os);
@@ -3577,7 +3681,7 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
- AllocationSpaceName(this->owner()->identity()));
+ this->owner()->name());
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
@@ -3598,5 +3702,23 @@ void Page::Print() {
}
#endif // DEBUG
+
+NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, NEW_LO_SPACE) {}
+
+AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ // TODO(hpayer): Add heap growing strategy here.
+ LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetFlag(MemoryChunk::IN_TO_SPACE);
+ page->InitializationMemoryFence();
+ return page->GetObject();
+}
+
+size_t NewLargeObjectSpace::Available() {
+ // TODO(hpayer): Update as soon as we have a growing strategy.
+ return 0;
+}
} // namespace internal
} // namespace v8