diff options
Diffstat (limited to 'deps/v8/src/heap/spaces.cc')
-rw-r--r-- | deps/v8/src/heap/spaces.cc | 1165 |
1 files changed, 585 insertions, 580 deletions
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc index 8a7fd1a14f..95d5687a8f 100644 --- a/deps/v8/src/heap/spaces.cc +++ b/deps/v8/src/heap/spaces.cc @@ -4,13 +4,18 @@ #include "src/heap/spaces.h" +#include <utility> + #include "src/base/bits.h" #include "src/base/platform/platform.h" +#include "src/base/platform/semaphore.h" #include "src/full-codegen/full-codegen.h" +#include "src/heap/array-buffer-tracker.h" #include "src/heap/slot-set.h" #include "src/macro-assembler.h" #include "src/msan.h" #include "src/snapshot/snapshot.h" +#include "src/v8.h" namespace v8 { namespace internal { @@ -19,51 +24,37 @@ namespace internal { // ---------------------------------------------------------------------------- // HeapObjectIterator -HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { - // You can't actually iterate over the anchor page. It is not a real page, - // just an anchor for the double linked page list. Initialize as if we have - // reached the end of the anchor page, then the first iteration will move on - // to the first page. - Initialize(space, NULL, NULL, kAllPagesInSpace); -} - - -HeapObjectIterator::HeapObjectIterator(Page* page) { +HeapObjectIterator::HeapObjectIterator(PagedSpace* space) + : cur_addr_(nullptr), + cur_end_(nullptr), + space_(space), + page_range_(space->anchor()->next_page(), space->anchor()), + current_page_(page_range_.begin()) {} + +HeapObjectIterator::HeapObjectIterator(Page* page) + : cur_addr_(nullptr), + cur_end_(nullptr), + space_(reinterpret_cast<PagedSpace*>(page->owner())), + page_range_(page), + current_page_(page_range_.begin()) { +#ifdef DEBUG Space* owner = page->owner(); DCHECK(owner == page->heap()->old_space() || owner == page->heap()->map_space() || owner == page->heap()->code_space()); - Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), - page->area_end(), kOnePageOnly); - DCHECK(page->SweepingDone()); -} - - -void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, - HeapObjectIterator::PageMode mode) { - space_ = space; - cur_addr_ = cur; - cur_end_ = end; - page_mode_ = mode; +#endif // DEBUG } - // We have hit the end of the page and should advance to the next block of // objects. This happens at the end of the page. bool HeapObjectIterator::AdvanceToNextPage() { - DCHECK(cur_addr_ == cur_end_); - if (page_mode_ == kOnePageOnly) return false; - Page* cur_page; - if (cur_addr_ == NULL) { - cur_page = space_->anchor(); - } else { - cur_page = Page::FromAddress(cur_addr_ - 1); - DCHECK(cur_addr_ == cur_page->area_end()); - } - cur_page = cur_page->next_page(); - if (cur_page == space_->anchor()) return false; - cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted( - cur_page); + DCHECK_EQ(cur_addr_, cur_end_); + if (current_page_ == page_range_.end()) return false; + Page* cur_page = *(current_page_++); + space_->heap() + ->mark_compact_collector() + ->sweeper() + .SweepOrWaitUntilSweepingCompleted(cur_page); cur_addr_ = cur_page->area_start(); cur_end_ = cur_page->area_end(); DCHECK(cur_page->SweepingDone()); @@ -115,15 +106,16 @@ bool CodeRange::SetUp(size_t requested) { requested = kMinimumCodeRangeSize; } + const size_t reserved_area = + kReservedCodeRangePages * base::OS::CommitPageSize(); + if (requested < (kMaximalCodeRangeSize - reserved_area)) + requested += reserved_area; + DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); -#ifdef V8_TARGET_ARCH_MIPS64 - // To use pseudo-relative jumps such as j/jal instructions which have 28-bit - // encoded immediate, the addresses have to be in range of 256Mb aligned - // region. - code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize); -#else - code_range_ = new base::VirtualMemory(requested); -#endif + + code_range_ = new base::VirtualMemory( + requested, Max(kCodeRangeAreaAlignment, + static_cast<size_t>(base::OS::AllocateAlignment()))); CHECK(code_range_ != NULL); if (!code_range_->IsReserved()) { delete code_range_; @@ -137,18 +129,16 @@ bool CodeRange::SetUp(size_t requested) { // On some platforms, specifically Win64, we need to reserve some pages at // the beginning of an executable space. - if (kReservedCodeRangePages) { - if (!code_range_->Commit( - base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) { + if (reserved_area > 0) { + if (!code_range_->Commit(base, reserved_area, true)) { delete code_range_; code_range_ = NULL; return false; } - base += kReservedCodeRangePages * base::OS::CommitPageSize(); + base += reserved_area; } Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); - size_t size = code_range_->size() - (aligned_base - base) - - kReservedCodeRangePages * base::OS::CommitPageSize(); + size_t size = code_range_->size() - (aligned_base - base) - reserved_area; allocation_list_.Add(FreeBlock(aligned_base, size)); current_allocation_block_index_ = 0; @@ -222,7 +212,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size, *allocated = current.size; DCHECK(*allocated <= current.size); DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); - if (!isolate_->memory_allocator()->CommitExecutableMemory( + if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory( code_range_, current.start, commit_size, *allocated)) { *allocated = 0; ReleaseBlock(¤t); @@ -233,7 +223,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size, bool CodeRange::CommitRawMemory(Address start, size_t length) { - return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); + return isolate_->heap()->memory_allocator()->CommitMemory(start, length, + EXECUTABLE); } @@ -294,15 +285,17 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) { MemoryAllocator::MemoryAllocator(Isolate* isolate) : isolate_(isolate), + code_range_(nullptr), capacity_(0), capacity_executable_(0), size_(0), size_executable_(0), lowest_ever_allocated_(reinterpret_cast<void*>(-1)), - highest_ever_allocated_(reinterpret_cast<void*>(0)) {} - + highest_ever_allocated_(reinterpret_cast<void*>(0)), + unmapper_(this) {} -bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { +bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable, + intptr_t code_range_size) { capacity_ = RoundUp(capacity, Page::kPageSize); capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); DCHECK_GE(capacity_, capacity_executable_); @@ -310,21 +303,103 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { size_ = 0; size_executable_ = 0; + code_range_ = new CodeRange(isolate_); + if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false; + return true; } void MemoryAllocator::TearDown() { - for (MemoryChunk* chunk : chunk_pool_) { + unmapper()->WaitUntilCompleted(); + + MemoryChunk* chunk = nullptr; + while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) { FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, NOT_EXECUTABLE); } + // Check that spaces were torn down before MemoryAllocator. DCHECK_EQ(size_.Value(), 0); // TODO(gc) this will be true again when we fix FreeMemory. // DCHECK(size_executable_ == 0); capacity_ = 0; capacity_executable_ = 0; + + if (last_chunk_.IsReserved()) { + last_chunk_.Release(); + } + + delete code_range_; + code_range_ = nullptr; +} + +class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task { + public: + explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {} + + private: + // v8::Task overrides. + void Run() override { + unmapper_->PerformFreeMemoryOnQueuedChunks(); + unmapper_->pending_unmapping_tasks_semaphore_.Signal(); + } + + Unmapper* unmapper_; + DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask); +}; + +void MemoryAllocator::Unmapper::FreeQueuedChunks() { + ReconsiderDelayedChunks(); + if (FLAG_concurrent_sweeping) { + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask); + concurrent_unmapping_tasks_active_++; + } else { + PerformFreeMemoryOnQueuedChunks(); + } +} + +bool MemoryAllocator::Unmapper::WaitUntilCompleted() { + bool waited = false; + while (concurrent_unmapping_tasks_active_ > 0) { + pending_unmapping_tasks_semaphore_.Wait(); + concurrent_unmapping_tasks_active_--; + waited = true; + } + return waited; +} + +void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { + MemoryChunk* chunk = nullptr; + // Regular chunks. + while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) { + bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); + allocator_->PerformFreeMemory(chunk); + if (pooled) AddMemoryChunkSafe<kPooled>(chunk); + } + // Non-regular chunks. + while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) { + allocator_->PerformFreeMemory(chunk); + } +} + +void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() { + std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_)); + // Move constructed, so the permanent list should be empty. + DCHECK(delayed_regular_chunks_.empty()); + for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) { + AddMemoryChunkSafe<kRegular>(*it); + } +} + +bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) { + MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector(); + // We cannot free memory chunks in new space while the sweeper is running + // since a sweeper thread might be stuck right before trying to lock the + // corresponding page. + return !chunk->InNewSpace() || (mc == nullptr) || + mc->sweeper().IsSweepingCompleted(); } bool MemoryAllocator::CommitMemory(Address base, size_t size, @@ -342,11 +417,9 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, Executability executable) { // TODO(gc) make code_range part of memory allocator? // Code which is part of the code-range does not have its own VirtualMemory. - DCHECK(isolate_->code_range() == NULL || - !isolate_->code_range()->contains( - static_cast<Address>(reservation->address()))); - DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || - !isolate_->code_range()->valid() || + DCHECK(code_range() == NULL || + !code_range()->contains(static_cast<Address>(reservation->address()))); + DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() || reservation->size() <= Page::kPageSize); reservation->Release(); @@ -356,20 +429,18 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, void MemoryAllocator::FreeMemory(Address base, size_t size, Executability executable) { // TODO(gc) make code_range part of memory allocator? - if (isolate_->code_range() != NULL && - isolate_->code_range()->contains(static_cast<Address>(base))) { + if (code_range() != NULL && + code_range()->contains(static_cast<Address>(base))) { DCHECK(executable == EXECUTABLE); - isolate_->code_range()->FreeRawMemory(base, size); + code_range()->FreeRawMemory(base, size); } else { - DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || - !isolate_->code_range()->valid()); + DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid()); bool result = base::VirtualMemory::ReleaseRegion(base, size); USE(result); DCHECK(result); } } - Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, base::VirtualMemory* controller) { base::VirtualMemory reservation(size, alignment); @@ -382,7 +453,6 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, return base; } - Address MemoryAllocator::AllocateAlignedMemory( size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, base::VirtualMemory* controller) { @@ -415,20 +485,12 @@ Address MemoryAllocator::AllocateAlignedMemory( return base; } - -void Page::InitializeAsAnchor(PagedSpace* owner) { - set_owner(owner); - set_prev_page(this); - set_next_page(this); -} - -void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { - set_owner(semi_space); +void Page::InitializeAsAnchor(Space* space) { + set_owner(space); set_next_chunk(this); set_prev_chunk(this); - // Flags marks this invalid page as not being in new-space. - // All real new-space pages will be in new-space. SetFlags(0, ~0); + SetFlag(ANCHOR); } MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, @@ -448,19 +510,22 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->InitializeReservedMemory(); chunk->old_to_new_slots_ = nullptr; chunk->old_to_old_slots_ = nullptr; + chunk->typed_old_to_new_slots_ = nullptr; chunk->typed_old_to_old_slots_ = nullptr; chunk->skip_list_ = nullptr; chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; chunk->progress_bar_ = 0; chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->concurrent_sweeping_state().SetValue(kSweepingDone); - chunk->mutex_ = nullptr; + chunk->mutex_ = new base::Mutex(); chunk->available_in_free_list_ = 0; chunk->wasted_memory_ = 0; chunk->ResetLiveBytes(); - Bitmap::Clear(chunk); + chunk->ClearLiveness(); chunk->set_next_chunk(nullptr); chunk->set_prev_chunk(nullptr); + chunk->local_tracker_ = nullptr; + chunk->black_area_end_marker_map_ = nullptr; DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); @@ -496,19 +561,18 @@ bool MemoryChunk::CommitArea(size_t requested) { if (reservation_.IsReserved()) { Executability executable = IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; - if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, - executable)) { + if (!heap()->memory_allocator()->CommitMemory(start, length, + executable)) { return false; } } else { - CodeRange* code_range = heap_->isolate()->code_range(); - DCHECK(code_range != NULL && code_range->valid() && - IsFlagSet(IS_EXECUTABLE)); + CodeRange* code_range = heap_->memory_allocator()->code_range(); + DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); if (!code_range->CommitRawMemory(start, length)) return false; } if (Heap::ShouldZapGarbage()) { - heap_->isolate()->memory_allocator()->ZapBlock(start, length); + heap_->memory_allocator()->ZapBlock(start, length); } } else if (commit_size < committed_size) { DCHECK(commit_size > 0); @@ -518,9 +582,8 @@ bool MemoryChunk::CommitArea(size_t requested) { if (reservation_.IsReserved()) { if (!reservation_.Uncommit(start, length)) return false; } else { - CodeRange* code_range = heap_->isolate()->code_range(); - DCHECK(code_range != NULL && code_range->valid() && - IsFlagSet(IS_EXECUTABLE)); + CodeRange* code_range = heap_->memory_allocator()->code_range(); + DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); if (!code_range->UncommitRawMemory(start, length)) return false; } } @@ -529,6 +592,11 @@ bool MemoryChunk::CommitArea(size_t requested) { return true; } +size_t MemoryChunk::CommittedPhysicalMemory() { + if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) + return size(); + return high_water_mark_.Value(); +} void MemoryChunk::InsertAfter(MemoryChunk* other) { MemoryChunk* other_next = other->next_chunk(); @@ -614,13 +682,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, #ifdef V8_TARGET_ARCH_MIPS64 // Use code range only for large object space on mips64 to keep address // range within 256-MB memory region. - if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && - reserve_area_size > CodePageAreaSize()) { + if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { #else - if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { + if (code_range()->valid()) { #endif - base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, - &chunk_size); + base = + code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size); DCHECK( IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); if (base == NULL) return NULL; @@ -669,9 +736,22 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, static_cast<int>(chunk_size)); LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); - if (owner != NULL) { - ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); - PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); + + // We cannot use the last chunk in the address space because we would + // overflow when comparing top and limit if this chunk is used for a + // linear allocation area. + if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) { + CHECK(!last_chunk_.IsReserved()); + last_chunk_.TakeControl(&reservation); + UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()), + last_chunk_.size()); + size_.Increment(-static_cast<intptr_t>(chunk_size)); + if (executable == EXECUTABLE) { + size_executable_.Increment(-static_cast<intptr_t>(chunk_size)); + } + CHECK(last_chunk_.IsReserved()); + return AllocateChunk(reserve_area_size, commit_area_size, executable, + owner); } return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, @@ -684,28 +764,30 @@ void Page::ResetFreeListStatistics() { available_in_free_list_ = 0; } -LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, - Space* owner, - Executability executable) { - MemoryChunk* chunk = - AllocateChunk(object_size, object_size, executable, owner); - if (chunk == NULL) return NULL; - if (executable && chunk->size() > LargePage::kMaxCodePageSize) { - STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); - FATAL("Code page is too large."); - } - return LargePage::Initialize(isolate_->heap(), chunk); -} +void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, + Address start_free) { + // We do not allow partial shrink for code. + DCHECK(chunk->executable() == NOT_EXECUTABLE); + + intptr_t size; + base::VirtualMemory* reservation = chunk->reserved_memory(); + DCHECK(reservation->IsReserved()); + size = static_cast<intptr_t>(reservation->size()); + size_t to_free_size = size - (start_free - chunk->address()); + + DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); + size_.Increment(-static_cast<intptr_t>(to_free_size)); + isolate_->counters()->memory_allocated()->Decrement( + static_cast<int>(to_free_size)); + chunk->set_size(size - to_free_size); + + reservation->ReleasePartial(start_free); +} void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); - if (chunk->owner() != NULL) { - ObjectSpace space = - static_cast<ObjectSpace>(1 << chunk->owner()->identity()); - PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); - } isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); @@ -735,36 +817,52 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { chunk->ReleaseAllocatedMemory(); base::VirtualMemory* reservation = chunk->reserved_memory(); - if (reservation->IsReserved()) { - FreeMemory(reservation, chunk->executable()); + if (chunk->IsFlagSet(MemoryChunk::POOLED)) { + UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); } else { - FreeMemory(chunk->address(), chunk->size(), chunk->executable()); + if (reservation->IsReserved()) { + FreeMemory(reservation, chunk->executable()); + } else { + FreeMemory(chunk->address(), chunk->size(), chunk->executable()); + } } } -template <MemoryAllocator::AllocationMode mode> +template <MemoryAllocator::FreeMode mode> void MemoryAllocator::Free(MemoryChunk* chunk) { - if (mode == kRegular) { - PreFreeMemory(chunk); - PerformFreeMemory(chunk); - } else { - DCHECK_EQ(mode, kPooled); - FreePooled(chunk); + switch (mode) { + case kFull: + PreFreeMemory(chunk); + PerformFreeMemory(chunk); + break; + case kPooledAndQueue: + DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); + DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); + chunk->SetFlag(MemoryChunk::POOLED); + // Fall through to kPreFreeAndQueue. + case kPreFreeAndQueue: + PreFreeMemory(chunk); + // The chunks added to this queue will be freed by a concurrent thread. + unmapper()->AddMemoryChunkSafe(chunk); + break; + default: + UNREACHABLE(); } } -template void MemoryAllocator::Free<MemoryAllocator::kRegular>( +template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk); + +template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>( MemoryChunk* chunk); -template void MemoryAllocator::Free<MemoryAllocator::kPooled>( +template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>( MemoryChunk* chunk); -template <typename PageType, MemoryAllocator::AllocationMode mode, - typename SpaceType> -PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, - Executability executable) { +template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType> +Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, + Executability executable) { MemoryChunk* chunk = nullptr; - if (mode == kPooled) { + if (alloc_mode == kPooled) { DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory)); DCHECK_EQ(executable, NOT_EXECUTABLE); chunk = AllocatePagePooled(owner); @@ -773,22 +871,32 @@ PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, chunk = AllocateChunk(size, size, executable, owner); } if (chunk == nullptr) return nullptr; - return PageType::Initialize(isolate_->heap(), chunk, executable, owner); + return Page::Initialize(isolate_->heap(), chunk, executable, owner); } -template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular, - PagedSpace>(intptr_t, PagedSpace*, - Executability); +template Page* +MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>( + intptr_t size, PagedSpace* owner, Executability executable); +template Page* +MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>( + intptr_t size, SemiSpace* owner, Executability executable); +template Page* +MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>( + intptr_t size, SemiSpace* owner, Executability executable); -template NewSpacePage* MemoryAllocator::AllocatePage< - NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*, - Executability); +LargePage* MemoryAllocator::AllocateLargePage(intptr_t size, + LargeObjectSpace* owner, + Executability executable) { + MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); + if (chunk == nullptr) return nullptr; + return LargePage::Initialize(isolate_->heap(), chunk, executable, owner); +} template <typename SpaceType> MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { - if (chunk_pool_.is_empty()) return nullptr; + MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe(); + if (chunk == nullptr) return nullptr; const int size = MemoryChunk::kPageSize; - MemoryChunk* chunk = chunk_pool_.RemoveLast(); const Address start = reinterpret_cast<Address>(chunk); const Address area_start = start + MemoryChunk::kObjectStartOffset; const Address area_end = start + size; @@ -802,18 +910,6 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { return chunk; } -void MemoryAllocator::FreePooled(MemoryChunk* chunk) { - DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); - DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); - chunk_pool_.Add(chunk); - intptr_t chunk_size = static_cast<intptr_t>(chunk->size()); - if (chunk->executable() == EXECUTABLE) { - size_executable_.Increment(-chunk_size); - } - size_.Increment(-chunk_size); - UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); -} - bool MemoryAllocator::CommitBlock(Address start, size_t size, Executability executable) { if (!CommitMemory(start, size, executable)) return false; @@ -840,60 +936,11 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) { } } - -void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, - AllocationAction action, - size_t size) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - MemoryAllocationCallbackRegistration registration = - memory_allocation_callbacks_[i]; - if ((registration.space & space) == space && - (registration.action & action) == action) - registration.callback(space, action, static_cast<int>(size)); - } -} - - -bool MemoryAllocator::MemoryAllocationCallbackRegistered( - MemoryAllocationCallback callback) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) return true; - } - return false; -} - - -void MemoryAllocator::AddMemoryAllocationCallback( - MemoryAllocationCallback callback, ObjectSpace space, - AllocationAction action) { - DCHECK(callback != NULL); - MemoryAllocationCallbackRegistration registration(callback, space, action); - DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); - return memory_allocation_callbacks_.Add(registration); -} - - -void MemoryAllocator::RemoveMemoryAllocationCallback( - MemoryAllocationCallback callback) { - DCHECK(callback != NULL); - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) { - memory_allocation_callbacks_.Remove(i); - return; - } - } - UNREACHABLE(); -} - - #ifdef DEBUG void MemoryAllocator::ReportStatistics() { intptr_t size = Size(); float pct = static_cast<float>(capacity_ - size) / capacity_; - PrintF(" capacity: %" V8_PTR_PREFIX - "d" - ", used: %" V8_PTR_PREFIX - "d" + PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR ", available: %%%d\n\n", capacity_, size, static_cast<int>(pct * 100)); } @@ -959,12 +1006,19 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, // MemoryChunk implementation void MemoryChunk::ReleaseAllocatedMemory() { - delete skip_list_; - skip_list_ = nullptr; - delete mutex_; - mutex_ = nullptr; - ReleaseOldToNewSlots(); - ReleaseOldToOldSlots(); + if (skip_list_ != nullptr) { + delete skip_list_; + skip_list_ = nullptr; + } + if (mutex_ != nullptr) { + delete mutex_; + mutex_ = nullptr; + } + if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots(); + if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots(); + if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots(); + if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots(); + if (local_tracker_ != nullptr) ReleaseLocalTracker(); } static SlotSet* AllocateSlotSet(size_t size, Address page_start) { @@ -997,6 +1051,16 @@ void MemoryChunk::ReleaseOldToOldSlots() { old_to_old_slots_ = nullptr; } +void MemoryChunk::AllocateTypedOldToNewSlots() { + DCHECK(nullptr == typed_old_to_new_slots_); + typed_old_to_new_slots_ = new TypedSlotSet(address()); +} + +void MemoryChunk::ReleaseTypedOldToNewSlots() { + delete typed_old_to_new_slots_; + typed_old_to_new_slots_ = nullptr; +} + void MemoryChunk::AllocateTypedOldToOldSlots() { DCHECK(nullptr == typed_old_to_old_slots_); typed_old_to_old_slots_ = new TypedSlotSet(address()); @@ -1006,6 +1070,23 @@ void MemoryChunk::ReleaseTypedOldToOldSlots() { delete typed_old_to_old_slots_; typed_old_to_old_slots_ = nullptr; } + +void MemoryChunk::AllocateLocalTracker() { + DCHECK_NULL(local_tracker_); + local_tracker_ = new LocalArrayBufferTracker(heap()); +} + +void MemoryChunk::ReleaseLocalTracker() { + DCHECK_NOT_NULL(local_tracker_); + delete local_tracker_; + local_tracker_ = nullptr; +} + +void MemoryChunk::ClearLiveness() { + markbits()->Clear(); + ResetLiveBytes(); +} + // ----------------------------------------------------------------------------- // PagedSpace implementation @@ -1029,13 +1110,11 @@ void Space::AllocationStep(Address soon_object, int size) { PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, Executability executable) - : Space(heap, space, executable), free_list_(this) { + : Space(heap, space, executable), anchor_(this), free_list_(this) { area_size_ = MemoryAllocator::PageAreaSize(space); accounting_stats_.Clear(); allocation_info_.Reset(nullptr, nullptr); - - anchor_.InitializeAsAnchor(this); } @@ -1046,9 +1125,10 @@ bool PagedSpace::HasBeenSetUp() { return true; } void PagedSpace::TearDown() { - PageIterator iterator(this); - while (iterator.has_next()) { - heap()->isolate()->memory_allocator()->Free(iterator.next()); + for (auto it = begin(); it != end();) { + Page* page = *(it++); // Will be erased. + ArrayBufferTracker::FreeAll(page); + heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page); } anchor_.set_next_page(&anchor_); anchor_.set_prev_page(&anchor_); @@ -1063,17 +1143,14 @@ void PagedSpace::RefillFreeList() { return; } MarkCompactCollector* collector = heap()->mark_compact_collector(); - List<Page*>* swept_pages = collector->swept_pages(identity()); intptr_t added = 0; { - base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex()); - for (int i = swept_pages->length() - 1; i >= 0; --i) { - Page* p = (*swept_pages)[i]; + Page* p = nullptr; + while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) { // Only during compaction pages can actually change ownership. This is // safe because there exists no other competing action on the page links // during compaction. if (is_local() && (p->owner() != this)) { - if (added > kCompactionMemoryWanted) break; base::LockGuard<base::Mutex> guard( reinterpret_cast<PagedSpace*>(p->owner())->mutex()); p->Unlink(); @@ -1082,7 +1159,7 @@ void PagedSpace::RefillFreeList() { } added += RelinkFreeListCategories(p); added += p->wasted_memory(); - swept_pages->Remove(i); + if (is_local() && (added > kCompactionMemoryWanted)) break; } } accounting_stats_.IncreaseCapacity(added); @@ -1107,10 +1184,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { AccountCommitted(other->CommittedMemory()); // Move over pages. - PageIterator it(other); - Page* p = nullptr; - while (it.has_next()) { - p = it.next(); + for (auto it = other->begin(); it != other->end();) { + Page* p = *(it++); // Relinking requires the category to be unlinked. other->UnlinkFreeListCategories(p); @@ -1127,18 +1202,16 @@ size_t PagedSpace::CommittedPhysicalMemory() { if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); size_t size = 0; - PageIterator it(this); - while (it.has_next()) { - size += it.next()->CommittedPhysicalMemory(); + for (Page* page : *this) { + size += page->CommittedPhysicalMemory(); } return size; } bool PagedSpace::ContainsSlow(Address addr) { Page* p = Page::FromAddress(addr); - PageIterator iterator(this); - while (iterator.has_next()) { - if (iterator.next() == p) return true; + for (Page* page : *this) { + if (page == p) return true; } return false; } @@ -1162,48 +1235,22 @@ Object* PagedSpace::FindObject(Address addr) { return Smi::FromInt(0); } - -bool PagedSpace::CanExpand(size_t size) { - DCHECK(heap()->mark_compact_collector()->is_compacting() || - Capacity() <= heap()->MaxOldGenerationSize()); - - // Are we going to exceed capacity for this space? At this point we can be - // way over the maximum size because of AlwaysAllocate scopes and large - // objects. - if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false; - - return true; -} - - bool PagedSpace::Expand() { - intptr_t size = AreaSize(); + int size = AreaSize(); if (snapshotable() && !HasPages()) { size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); } - if (!CanExpand(size)) return false; + if (!heap()->CanExpandOldGeneration(size)) return false; - Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>( - size, this, executable()); - if (p == NULL) return false; + Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); + if (p == nullptr) return false; AccountCommitted(static_cast<intptr_t>(p->size())); // Pages created during bootstrapping may contain immortal immovable objects. if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); - // When incremental marking was activated, old space pages are allocated - // black. - if (heap()->incremental_marking()->black_allocation() && - identity() == OLD_SPACE) { - Bitmap::SetAllBits(p); - p->SetFlag(Page::BLACK_PAGE); - if (FLAG_trace_incremental_marking) { - PrintIsolate(heap()->isolate(), "Added black page %p\n", p); - } - } - DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); p->InsertAfter(anchor_.prev_page()); @@ -1213,27 +1260,78 @@ bool PagedSpace::Expand() { int PagedSpace::CountTotalPages() { - PageIterator it(this); int count = 0; - while (it.has_next()) { - it.next(); + for (Page* page : *this) { count++; + USE(page); } return count; } void PagedSpace::ResetFreeListStatistics() { - PageIterator page_iterator(this); - while (page_iterator.has_next()) { - Page* page = page_iterator.next(); + for (Page* page : *this) { page->ResetFreeListStatistics(); } } +void PagedSpace::SetAllocationInfo(Address top, Address limit) { + SetTopAndLimit(top, limit); + if (top != nullptr && top != limit && + heap()->incremental_marking()->black_allocation()) { + Page* page = Page::FromAllocationAreaAddress(top); + page->markbits()->SetRange(page->AddressToMarkbitIndex(top), + page->AddressToMarkbitIndex(limit)); + page->IncrementLiveBytes(static_cast<int>(limit - top)); + } +} + +void PagedSpace::MarkAllocationInfoBlack() { + DCHECK(heap()->incremental_marking()->black_allocation()); + Address current_top = top(); + Address current_limit = limit(); + if (current_top != nullptr && current_top != current_limit) { + Page* page = Page::FromAllocationAreaAddress(current_top); + page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top), + page->AddressToMarkbitIndex(current_limit)); + page->IncrementLiveBytes(static_cast<int>(current_limit - current_top)); + } +} + +// Empty space allocation info, returning unused area to free list. +void PagedSpace::EmptyAllocationInfo() { + // Mark the old linear allocation area with a free space map so it can be + // skipped when scanning the heap. + Address current_top = top(); + Address current_limit = limit(); + if (current_top == nullptr) { + DCHECK(current_limit == nullptr); + return; + } + + if (heap()->incremental_marking()->black_allocation()) { + Page* page = Page::FromAllocationAreaAddress(current_top); + // We have to remember the end of the current black allocation area if + // something was allocated in the current bump pointer range. + if (allocation_info_.original_top() != current_top) { + Address end_black_area = current_top - kPointerSize; + page->AddBlackAreaEndMarker(end_black_area); + } + + // Clear the bits in the unused black area. + if (current_top != current_limit) { + page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top), + page->AddressToMarkbitIndex(current_limit)); + page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top)); + } + } -void PagedSpace::IncreaseCapacity(int size) { - accounting_stats_.ExpandSpace(size); + SetTopAndLimit(NULL, NULL); + Free(current_top, static_cast<int>(current_limit - current_top)); +} + +void PagedSpace::IncreaseCapacity(size_t bytes) { + accounting_stats_.ExpandSpace(bytes); } void PagedSpace::ReleasePage(Page* page) { @@ -1244,7 +1342,9 @@ void PagedSpace::ReleasePage(Page* page) { free_list_.EvictFreeListItems(page); DCHECK(!free_list_.ContainsPageFreeListItems(page)); - if (Page::FromAllocationTop(allocation_info_.top()) == page) { + page->ReleaseBlackAreaEndMarkerMap(); + + if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { allocation_info_.Reset(nullptr, nullptr); } @@ -1255,7 +1355,7 @@ void PagedSpace::ReleasePage(Page* page) { } AccountUncommitted(static_cast<intptr_t>(page->size())); - heap()->QueueMemoryChunkForFree(page); + heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); DCHECK(Capacity() > 0); accounting_stats_.ShrinkSpace(AreaSize()); @@ -1269,11 +1369,9 @@ void PagedSpace::Print() {} void PagedSpace::Verify(ObjectVisitor* visitor) { bool allocation_pointer_found_in_space = (allocation_info_.top() == allocation_info_.limit()); - PageIterator page_iterator(this); - while (page_iterator.has_next()) { - Page* page = page_iterator.next(); + for (Page* page : *this) { CHECK(page->owner() == this); - if (page == Page::FromAllocationTop(allocation_info_.top())) { + if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) { allocation_pointer_found_in_space = true; } CHECK(page->SweepingDone()); @@ -1299,8 +1397,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { // All the interior pointers should be contained in the heap. int size = object->Size(); object->IterateBody(map->instance_type(), size, visitor); - if (!page->IsFlagSet(Page::BLACK_PAGE) && - Marking::IsBlack(Marking::MarkBitFrom(object))) { + if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { black_size += size; } @@ -1358,7 +1455,6 @@ void NewSpace::TearDown() { from_space_.TearDown(); } - void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } @@ -1404,6 +1500,48 @@ void NewSpace::Shrink() { DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } +bool NewSpace::Rebalance() { + CHECK(heap()->promotion_queue()->is_empty()); + // Order here is important to make use of the page pool. + return to_space_.EnsureCurrentCapacity() && + from_space_.EnsureCurrentCapacity(); +} + +bool SemiSpace::EnsureCurrentCapacity() { + if (is_committed()) { + const int expected_pages = current_capacity_ / Page::kPageSize; + int actual_pages = 0; + Page* current_page = anchor()->next_page(); + while (current_page != anchor()) { + actual_pages++; + current_page = current_page->next_page(); + if (actual_pages > expected_pages) { + Page* to_remove = current_page->prev_page(); + // Make sure we don't overtake the actual top pointer. + CHECK_NE(to_remove, current_page_); + to_remove->Unlink(); + heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( + to_remove); + } + } + while (actual_pages < expected_pages) { + actual_pages++; + current_page = + heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( + Page::kAllocatableMemory, this, executable()); + if (current_page == nullptr) return false; + DCHECK_NOT_NULL(current_page); + current_page->InsertAfter(anchor()); + current_page->ClearLiveness(); + current_page->SetFlags(anchor()->prev_page()->GetFlags(), + Page::kCopyAllFlags); + heap()->CreateFillerObjectAt(current_page->area_start(), + current_page->area_size(), + ClearRecordedSlots::kNo); + } + } + return true; +} void LocalAllocationBuffer::Close() { if (IsValid()) { @@ -1460,11 +1598,9 @@ void NewSpace::ResetAllocationInfo() { Address old_top = allocation_info_.top(); to_space_.Reset(); UpdateAllocationInfo(); - pages_used_ = 0; // Clear all mark-bits in the to-space. - NewSpacePageIterator it(&to_space_); - while (it.has_next()) { - Bitmap::Clear(it.next()); + for (Page* p : to_space_) { + p->ClearLiveness(); } InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0); } @@ -1492,21 +1628,20 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { bool NewSpace::AddFreshPage() { Address top = allocation_info_.top(); - DCHECK(!NewSpacePage::IsAtStart(top)); + DCHECK(!Page::IsAtObjectStart(top)); if (!to_space_.AdvancePage()) { // No more pages left to advance. return false; } // Clear remainder of current page. - Address limit = NewSpacePage::FromLimit(top)->area_end(); + Address limit = Page::FromAllocationAreaAddress(top)->area_end(); if (heap()->gc_state() == Heap::SCAVENGE) { heap()->promotion_queue()->SetNewLimit(limit); } int remaining_in_page = static_cast<int>(limit - top); heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo); - pages_used_++; UpdateAllocationInfo(); return true; @@ -1526,7 +1661,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes, int filler_size = Heap::GetFillToAlign(old_top, alignment); int aligned_size_in_bytes = size_in_bytes + filler_size; - if (old_top + aligned_size_in_bytes >= high) { + if (old_top + aligned_size_in_bytes > high) { // Not enough room in the page, try to allocate a new one. if (!AddFreshPage()) { return false; @@ -1537,10 +1672,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes, old_top = allocation_info_.top(); high = to_space_.page_high(); filler_size = Heap::GetFillToAlign(old_top, alignment); - aligned_size_in_bytes = size_in_bytes + filler_size; } - DCHECK(old_top + aligned_size_in_bytes < high); + DCHECK(old_top + aligned_size_in_bytes <= high); if (allocation_info_.limit() < high) { // Either the limit has been lowered because linear allocation was disabled @@ -1626,9 +1760,9 @@ void NewSpace::Verify() { CHECK_EQ(current, to_space_.space_start()); while (current != top()) { - if (!NewSpacePage::IsAtEnd(current)) { + if (!Page::IsAlignedToPageSize(current)) { // The allocation pointer should not be in the middle of an object. - CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || + CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) || current < top()); HeapObject* object = HeapObject::FromAddress(current); @@ -1654,7 +1788,7 @@ void NewSpace::Verify() { current += size; } else { // At end of page, switch to next page. - NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); + Page* page = Page::FromAllocationAreaAddress(current)->next_page(); // Next page should be valid. CHECK(!page->is_anchor()); current = page->area_start(); @@ -1683,22 +1817,24 @@ void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) { void SemiSpace::TearDown() { // Properly uncommit memory to keep the allocator counters in sync. - if (is_committed()) Uncommit(); + if (is_committed()) { + for (Page* p : *this) { + ArrayBufferTracker::FreeAll(p); + } + Uncommit(); + } current_capacity_ = maximum_capacity_ = 0; } bool SemiSpace::Commit() { DCHECK(!is_committed()); - NewSpacePage* current = anchor(); + Page* current = anchor(); const int num_pages = current_capacity_ / Page::kPageSize; for (int pages_added = 0; pages_added < num_pages; pages_added++) { - NewSpacePage* new_page = - heap() - ->isolate() - ->memory_allocator() - ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( - NewSpacePage::kAllocatableMemory, this, executable()); + Page* new_page = + heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( + Page::kAllocatableMemory, this, executable()); if (new_page == nullptr) { RewindPages(current, pages_added); return false; @@ -1718,15 +1854,15 @@ bool SemiSpace::Commit() { bool SemiSpace::Uncommit() { DCHECK(is_committed()); - NewSpacePageIterator it(this); - while (it.has_next()) { - heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( - it.next()); + for (auto it = begin(); it != end();) { + Page* p = *(it++); + heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p); } anchor()->set_next_page(anchor()); anchor()->set_prev_page(anchor()); AccountUncommitted(current_capacity_); committed_ = false; + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); return true; } @@ -1734,9 +1870,8 @@ bool SemiSpace::Uncommit() { size_t SemiSpace::CommittedPhysicalMemory() { if (!is_committed()) return 0; size_t size = 0; - NewSpacePageIterator it(this); - while (it.has_next()) { - size += it.next()->CommittedPhysicalMemory(); + for (Page* p : *this) { + size += p->CommittedPhysicalMemory(); } return size; } @@ -1746,30 +1881,26 @@ bool SemiSpace::GrowTo(int new_capacity) { if (!is_committed()) { if (!Commit()) return false; } - DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); + DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); DCHECK_LE(new_capacity, maximum_capacity_); DCHECK_GT(new_capacity, current_capacity_); const int delta = new_capacity - current_capacity_; DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); - int delta_pages = delta / NewSpacePage::kPageSize; - NewSpacePage* last_page = anchor()->prev_page(); + const int delta_pages = delta / Page::kPageSize; + Page* last_page = anchor()->prev_page(); DCHECK_NE(last_page, anchor()); for (int pages_added = 0; pages_added < delta_pages; pages_added++) { - NewSpacePage* new_page = - heap() - ->isolate() - ->memory_allocator() - ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( - NewSpacePage::kAllocatableMemory, this, executable()); + Page* new_page = + heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( + Page::kAllocatableMemory, this, executable()); if (new_page == nullptr) { RewindPages(last_page, pages_added); return false; } new_page->InsertAfter(last_page); - Bitmap::Clear(new_page); + new_page->ClearLiveness(); // Duplicate the flags that was set on the old page. - new_page->SetFlags(last_page->GetFlags(), - NewSpacePage::kCopyOnFlipFlagsMask); + new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask); last_page = new_page; } AccountCommitted(static_cast<intptr_t>(delta)); @@ -1777,9 +1908,9 @@ bool SemiSpace::GrowTo(int new_capacity) { return true; } -void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) { - NewSpacePage* new_last_page = nullptr; - NewSpacePage* last_page = start; +void SemiSpace::RewindPages(Page* start, int num_pages) { + Page* new_last_page = nullptr; + Page* last_page = start; while (num_pages > 0) { DCHECK_NE(last_page, anchor()); new_last_page = last_page->prev_page(); @@ -1791,25 +1922,26 @@ void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) { } bool SemiSpace::ShrinkTo(int new_capacity) { - DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); + DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); DCHECK_GE(new_capacity, minimum_capacity_); DCHECK_LT(new_capacity, current_capacity_); if (is_committed()) { const int delta = current_capacity_ - new_capacity; DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); - int delta_pages = delta / NewSpacePage::kPageSize; - NewSpacePage* new_last_page; - NewSpacePage* last_page; + int delta_pages = delta / Page::kPageSize; + Page* new_last_page; + Page* last_page; while (delta_pages > 0) { last_page = anchor()->prev_page(); new_last_page = last_page->prev_page(); new_last_page->set_next_page(anchor()); anchor()->set_prev_page(new_last_page); - heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( + heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( last_page); delta_pages--; } AccountUncommitted(static_cast<intptr_t>(delta)); + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); } current_capacity_ = new_capacity; return true; @@ -1817,13 +1949,10 @@ bool SemiSpace::ShrinkTo(int new_capacity) { void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { anchor_.set_owner(this); - // Fixup back-pointers to anchor. Address of anchor changes when we swap. anchor_.prev_page()->set_next_page(&anchor_); anchor_.next_page()->set_prev_page(&anchor_); - NewSpacePageIterator it(this); - while (it.has_next()) { - NewSpacePage* page = it.next(); + for (Page* page : *this) { page->set_owner(this); page->SetFlags(flags, mask); if (id_ == kToSpace) { @@ -1844,8 +1973,22 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { void SemiSpace::Reset() { DCHECK_NE(anchor_.next_page(), &anchor_); current_page_ = anchor_.next_page(); + pages_used_ = 0; +} + +void SemiSpace::RemovePage(Page* page) { + if (current_page_ == page) { + current_page_ = page->prev_page(); + } + page->Unlink(); } +void SemiSpace::PrependPage(Page* page) { + page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags); + page->set_owner(this); + page->InsertAfter(anchor()); + pages_used_++; +} void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { // We won't be swapping semispaces without data in them. @@ -1863,18 +2006,17 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { std::swap(from->anchor_, to->anchor_); std::swap(from->current_page_, to->current_page_); - to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask); + to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask); from->FixPagesFlags(0, 0); } void SemiSpace::set_age_mark(Address mark) { - DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this); + DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this); age_mark_ = mark; // Mark all pages up to the one containing mark. - NewSpacePageIterator it(space_start(), mark); - while (it.has_next()) { - it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); + for (Page* p : NewSpacePageRange(space_start(), mark)) { + p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); } } @@ -1886,10 +2028,10 @@ void SemiSpace::Print() {} #ifdef VERIFY_HEAP void SemiSpace::Verify() { bool is_from_space = (id_ == kFromSpace); - NewSpacePage* page = anchor_.next_page(); - CHECK(anchor_.semi_space() == this); + Page* page = anchor_.next_page(); + CHECK(anchor_.owner() == this); while (page != &anchor_) { - CHECK_EQ(page->semi_space(), this); + CHECK_EQ(page->owner(), this); CHECK(page->InNewSpace()); CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE : MemoryChunk::IN_TO_SPACE)); @@ -1917,10 +2059,10 @@ void SemiSpace::Verify() { #ifdef DEBUG void SemiSpace::AssertValidRange(Address start, Address end) { // Addresses belong to same semi-space - NewSpacePage* page = NewSpacePage::FromLimit(start); - NewSpacePage* end_page = NewSpacePage::FromLimit(end); - SemiSpace* space = page->semi_space(); - CHECK_EQ(space, end_page->semi_space()); + Page* page = Page::FromAllocationAreaAddress(start); + Page* end_page = Page::FromAllocationAreaAddress(end); + SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner()); + CHECK_EQ(space, end_page->owner()); // Start address is before end address, either on same page, // or end address is on a later page in the linked list of // semi-space pages. @@ -1950,7 +2092,6 @@ void SemiSpaceIterator::Initialize(Address start, Address end) { limit_ = end; } - #ifdef DEBUG // heap_histograms is shared, always clear it before using it. static void ClearHistograms(Isolate* isolate) { @@ -1966,27 +2107,6 @@ static void ClearHistograms(Isolate* isolate) { isolate->js_spill_information()->Clear(); } - -static void ClearCodeKindStatistics(int* code_kind_statistics) { - for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { - code_kind_statistics[i] = 0; - } -} - - -static void ReportCodeKindStatistics(int* code_kind_statistics) { - PrintF("\n Code kind histograms: \n"); - for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { - if (code_kind_statistics[i] > 0) { - PrintF(" %-20s: %10d bytes\n", - Code::Kind2String(static_cast<Code::Kind>(i)), - code_kind_statistics[i]); - } - } - PrintF("\n"); -} - - static int CollectHistogramInfo(HeapObject* obj) { Isolate* isolate = obj->GetIsolate(); InstanceType type = obj->map()->instance_type(); @@ -2087,9 +2207,7 @@ void NewSpace::ReportStatistics() { #ifdef DEBUG if (FLAG_heap_stats) { float pct = static_cast<float>(Available()) / TotalCapacity(); - PrintF(" capacity: %" V8_PTR_PREFIX - "d" - ", available: %" V8_PTR_PREFIX "d, %%%d\n", + PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n", TotalCapacity(), Available(), static_cast<int>(pct * 100)); PrintF("\n Object Histogram:\n"); for (int i = 0; i <= LAST_TYPE; i++) { @@ -2321,6 +2439,9 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type, DCHECK(IsVeryLong() || Available() == SumFreeLists()); return node; } + if (current->is_empty()) { + RemoveCategory(current); + } } return node; } @@ -2373,8 +2494,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // Mark the old linear allocation area with a free space map so it can be // skipped when scanning the heap. This also puts it back in the free list // if it is big enough. - owner_->Free(owner_->top(), old_linear_size); - owner_->SetTopAndLimit(nullptr, nullptr); + owner_->EmptyAllocationInfo(); owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - old_linear_size); @@ -2382,7 +2502,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { int new_node_size = 0; FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); if (new_node == nullptr) return nullptr; - owner_->AllocationStep(new_node->address(), size_in_bytes); int bytes_left = new_node_size - size_in_bytes; DCHECK(bytes_left >= 0); @@ -2409,7 +2528,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // Keep the linear allocation area empty if requested to do so, just // return area back to the free list instead. owner_->Free(new_node->address() + size_in_bytes, bytes_left); - DCHECK(owner_->top() == NULL && owner_->limit() == NULL); + owner_->SetAllocationInfo(new_node->address() + size_in_bytes, + new_node->address() + size_in_bytes); } else if (bytes_left > kThreshold && owner_->heap()->incremental_marking()->IsMarkingIncomplete() && FLAG_incremental_marking) { @@ -2419,13 +2539,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // we want to do another increment until the linear area is used up. owner_->Free(new_node->address() + size_in_bytes + linear_size, new_node_size - size_in_bytes - linear_size); - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, - new_node->address() + size_in_bytes + linear_size); - } else if (bytes_left > 0) { + owner_->SetAllocationInfo( + new_node->address() + size_in_bytes, + new_node->address() + size_in_bytes + linear_size); + } else { + DCHECK(bytes_left >= 0); // Normally we give the rest of the node to the allocator as its new // linear allocation area. - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, - new_node->address() + new_node_size); + owner_->SetAllocationInfo(new_node->address() + size_in_bytes, + new_node->address() + new_node_size); } return new_node; @@ -2495,10 +2617,11 @@ void FreeList::RemoveCategory(FreeListCategory* category) { void FreeList::PrintCategories(FreeListCategoryType type) { FreeListCategoryIterator it(this, type); - PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type); + PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this), + static_cast<void*>(categories_[type]), type); while (it.HasNext()) { FreeListCategory* current = it.Next(); - PrintF("%p -> ", current); + PrintF("%p -> ", static_cast<void*>(current)); } PrintF("null\n"); } @@ -2582,9 +2705,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() { free_list_.RepairLists(heap()); // Each page may have a small free space that is not tracked by a free list. // Update the maps for those free space objects. - PageIterator iterator(this); - while (iterator.has_next()) { - Page* page = iterator.next(); + for (Page* page : *this) { int size = static_cast<int>(page->wasted_memory()); if (size == 0) continue; Address address = page->OffsetToAddress(Page::kPageSize - size); @@ -2596,7 +2717,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() { void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { if (allocation_info_.top() >= allocation_info_.limit()) return; - if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) { + if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) { // Create filler object to keep page iterable if it was iterable. int remaining = static_cast<int>(allocation_info_.limit() - allocation_info_.top()); @@ -2648,8 +2769,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { if (object != NULL) return object; // If sweeping is still in progress try to sweep pages on the main thread. - int max_freed = collector->SweepInParallel(heap()->paged_space(identity()), - size_in_bytes, kMaxPagesToSweep); + int max_freed = collector->sweeper().ParallelSweepSpace( + identity(), size_in_bytes, kMaxPagesToSweep); RefillFreeList(); if (max_freed >= size_in_bytes) { object = free_list_.Allocate(size_in_bytes); @@ -2681,145 +2802,11 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { return SweepAndRetryAllocation(size_in_bytes); } - #ifdef DEBUG -void PagedSpace::ReportCodeStatistics(Isolate* isolate) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - ReportCodeKindStatistics(isolate->code_kind_statistics()); - PrintF( - "Code comment statistics (\" [ comment-txt : size/ " - "count (average)\"):\n"); - for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { - const CommentStatistic& cs = comments_statistics[i]; - if (cs.size > 0) { - PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, - cs.size / cs.count); - } - } - PrintF("\n"); -} - - -void PagedSpace::ResetCodeStatistics(Isolate* isolate) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - ClearCodeKindStatistics(isolate->code_kind_statistics()); - for (int i = 0; i < CommentStatistic::kMaxComments; i++) { - comments_statistics[i].Clear(); - } - comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; - comments_statistics[CommentStatistic::kMaxComments].size = 0; - comments_statistics[CommentStatistic::kMaxComments].count = 0; -} - - -// Adds comment to 'comment_statistics' table. Performance OK as long as -// 'kMaxComments' is small -static void EnterComment(Isolate* isolate, const char* comment, int delta) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - // Do not count empty comments - if (delta <= 0) return; - CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; - // Search for a free or matching entry in 'comments_statistics': 'cs' - // points to result. - for (int i = 0; i < CommentStatistic::kMaxComments; i++) { - if (comments_statistics[i].comment == NULL) { - cs = &comments_statistics[i]; - cs->comment = comment; - break; - } else if (strcmp(comments_statistics[i].comment, comment) == 0) { - cs = &comments_statistics[i]; - break; - } - } - // Update entry for 'comment' - cs->size += delta; - cs->count += 1; -} - - -// Call for each nested comment start (start marked with '[ xxx', end marked -// with ']'. RelocIterator 'it' must point to a comment reloc info. -static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { - DCHECK(!it->done()); - DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); - const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); - if (tmp[0] != '[') { - // Not a nested comment; skip - return; - } - - // Search for end of nested comment or a new nested comment - const char* const comment_txt = - reinterpret_cast<const char*>(it->rinfo()->data()); - const byte* prev_pc = it->rinfo()->pc(); - int flat_delta = 0; - it->next(); - while (true) { - // All nested comments must be terminated properly, and therefore exit - // from loop. - DCHECK(!it->done()); - if (it->rinfo()->rmode() == RelocInfo::COMMENT) { - const char* const txt = - reinterpret_cast<const char*>(it->rinfo()->data()); - flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); - if (txt[0] == ']') break; // End of nested comment - // A new comment - CollectCommentStatistics(isolate, it); - // Skip code that was covered with previous comment - prev_pc = it->rinfo()->pc(); - } - it->next(); - } - EnterComment(isolate, comment_txt, flat_delta); -} - - -// Collects code size statistics: -// - by code kind -// - by code comment -void PagedSpace::CollectCodeStatistics() { - Isolate* isolate = heap()->isolate(); - HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { - if (obj->IsAbstractCode()) { - AbstractCode* code = AbstractCode::cast(obj); - isolate->code_kind_statistics()[code->kind()] += code->Size(); - } - if (obj->IsCode()) { - // TODO(mythria): Also enable this for BytecodeArray when it supports - // RelocInformation. - Code* code = Code::cast(obj); - RelocIterator it(code); - int delta = 0; - const byte* prev_pc = code->instruction_start(); - while (!it.done()) { - if (it.rinfo()->rmode() == RelocInfo::COMMENT) { - delta += static_cast<int>(it.rinfo()->pc() - prev_pc); - CollectCommentStatistics(isolate, &it); - prev_pc = it.rinfo()->pc(); - } - it.next(); - } - - DCHECK(code->instruction_start() <= prev_pc && - prev_pc <= code->instruction_end()); - delta += static_cast<int>(code->instruction_end() - prev_pc); - EnterComment(isolate, "NoComment", delta); - } - } -} - - void PagedSpace::ReportStatistics() { int pct = static_cast<int>(Available() * 100 / Capacity()); - PrintF(" capacity: %" V8_PTR_PREFIX - "d" - ", waste: %" V8_PTR_PREFIX - "d" - ", available: %" V8_PTR_PREFIX "d, %%%d\n", + PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR + ", available: %" V8PRIdPTR ", %%%d\n", Capacity(), Waste(), Available(), pct); if (heap()->mark_compact_collector()->sweeping_in_progress()) { @@ -2841,6 +2828,25 @@ void PagedSpace::ReportStatistics() { void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } #endif +Address LargePage::GetAddressToShrink() { + HeapObject* object = GetObject(); + if (executable() == EXECUTABLE) { + return 0; + } + size_t used_size = RoundUp((object->address() - address()) + object->Size(), + base::OS::CommitPageSize()); + if (used_size < CommittedPhysicalMemory()) { + return address() + used_size; + } + return 0; +} + +void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { + RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end()); + RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end()); + RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end()); + RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end()); +} // ----------------------------------------------------------------------------- // LargeObjectIterator @@ -2862,15 +2868,13 @@ HeapObject* LargeObjectIterator::Next() { // ----------------------------------------------------------------------------- // LargeObjectSpace - LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis first_page_(NULL), size_(0), page_count_(0), objects_size_(0), - chunk_map_(HashMap::PointersMatch, 1024) {} - + chunk_map_(base::HashMap::PointersMatch, 1024) {} LargeObjectSpace::~LargeObjectSpace() {} @@ -2890,11 +2894,7 @@ void LargeObjectSpace::TearDown() { LargePage* page = first_page_; first_page_ = first_page_->next_page(); LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); - - ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); - heap()->isolate()->memory_allocator()->PerformAllocationCallback( - space, kAllocationActionFree, page->size()); - heap()->isolate()->memory_allocator()->Free(page); + heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page); } SetUp(); } @@ -2908,7 +2908,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, return AllocationResult::Retry(identity()); } - LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage( + LargePage* page = heap()->memory_allocator()->AllocateLargePage( object_size, this, executable); if (page == NULL) return AllocationResult::Retry(identity()); DCHECK(page->area_size() >= object_size); @@ -2920,16 +2920,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, page->set_next_page(first_page_); first_page_ = page; - // Register all MemoryChunk::kAlignment-aligned chunks covered by - // this large page in the chunk map. - uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; - uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; - for (uintptr_t key = base; key <= limit; key++) { - HashMap::Entry* entry = chunk_map_.LookupOrInsert( - reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); - DCHECK(entry != NULL); - entry->value = page; - } + InsertChunkMapEntries(page); HeapObject* object = page->GetObject(); MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); @@ -2944,19 +2935,20 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, heap()->incremental_marking()->OldSpaceStep(object_size); AllocationStep(object->address(), object_size); + + if (heap()->incremental_marking()->black_allocation()) { + Marking::MarkBlack(ObjectMarking::MarkBitFrom(object)); + MemoryChunk::IncrementLiveBytesFromGC(object, object_size); + } return object; } size_t LargeObjectSpace::CommittedPhysicalMemory() { - if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); - size_t size = 0; - LargePage* current = first_page_; - while (current != NULL) { - size += current->CommittedPhysicalMemory(); - current = current->next_page(); - } - return size; + // On a platform that provides lazy committing of memory, we over-account + // the actually committed memory. There is no easy way right now to support + // precise accounting of committed memory in large object space. + return CommittedMemory(); } @@ -2972,8 +2964,8 @@ Object* LargeObjectSpace::FindObject(Address a) { LargePage* LargeObjectSpace::FindPage(Address a) { uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; - HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), - static_cast<uint32_t>(key)); + base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), + static_cast<uint32_t>(key)); if (e != NULL) { DCHECK(e->value != NULL); LargePage* page = reinterpret_cast<LargePage*>(e->value); @@ -2990,7 +2982,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { LargePage* current = first_page_; while (current != NULL) { HeapObject* object = current->GetObject(); - MarkBit mark_bit = Marking::MarkBitFrom(object); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(object); DCHECK(Marking::IsBlack(mark_bit)); Marking::BlackToWhite(mark_bit); Page::FromAddress(object->address())->ResetProgressBar(); @@ -2999,15 +2991,51 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { } } +void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { + // Register all MemoryChunk::kAlignment-aligned chunks covered by + // this large page in the chunk map. + uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; + uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) / + MemoryChunk::kAlignment; + for (uintptr_t key = start; key <= limit; key++) { + base::HashMap::Entry* entry = chunk_map_.InsertNew( + reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); + DCHECK(entry != NULL); + entry->value = page; + } +} + +void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { + RemoveChunkMapEntries(page, page->address()); +} + +void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, + Address free_start) { + uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start), + MemoryChunk::kAlignment) / + MemoryChunk::kAlignment; + uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) / + MemoryChunk::kAlignment; + for (uintptr_t key = start; key <= limit; key++) { + chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); + } +} void LargeObjectSpace::FreeUnmarkedObjects() { LargePage* previous = NULL; LargePage* current = first_page_; while (current != NULL) { HeapObject* object = current->GetObject(); - MarkBit mark_bit = Marking::MarkBitFrom(object); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(object); DCHECK(!Marking::IsGrey(mark_bit)); if (Marking::IsBlack(mark_bit)) { + Address free_start; + if ((free_start = current->GetAddressToShrink()) != 0) { + // TODO(hpayer): Perform partial free concurrently. + current->ClearOutOfLiveRangeSlots(free_start); + RemoveChunkMapEntries(current, free_start); + heap()->memory_allocator()->PartialFreeMemory(current, free_start); + } previous = current; current = current->next_page(); } else { @@ -3026,18 +3054,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() { objects_size_ -= object->Size(); page_count_--; - // Remove entries belonging to this page. - // Use variable alignment to help pass length check (<= 80 characters) - // of single line in tools/presubmit.py. - const intptr_t alignment = MemoryChunk::kAlignment; - uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; - uintptr_t limit = base + (page->size() - 1) / alignment; - for (uintptr_t key = base; key <= limit; key++) { - chunk_map_.Remove(reinterpret_cast<void*>(key), - static_cast<uint32_t>(key)); - } - - heap()->QueueMemoryChunkForFree(page); + RemoveChunkMapEntries(page); + heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); } } } @@ -3103,7 +3121,6 @@ void LargeObjectSpace::Verify() { } #endif - #ifdef DEBUG void LargeObjectSpace::Print() { OFStream os(stdout); @@ -3115,7 +3132,7 @@ void LargeObjectSpace::Print() { void LargeObjectSpace::ReportStatistics() { - PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); + PrintF(" size: %" V8PRIdPTR "\n", size_); int num_objects = 0; ClearHistograms(heap()->isolate()); LargeObjectIterator it(this); @@ -3126,34 +3143,22 @@ void LargeObjectSpace::ReportStatistics() { PrintF( " number of objects %d, " - "size of objects %" V8_PTR_PREFIX "d\n", + "size of objects %" V8PRIdPTR "\n", num_objects, objects_size_); if (num_objects > 0) ReportHistogram(heap()->isolate(), false); } -void LargeObjectSpace::CollectCodeStatistics() { - Isolate* isolate = heap()->isolate(); - LargeObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { - if (obj->IsAbstractCode()) { - AbstractCode* code = AbstractCode::cast(obj); - isolate->code_kind_statistics()[code->kind()] += code->Size(); - } - } -} - - void Page::Print() { // Make a best-effort to print the objects in the page. - PrintF("Page@%p in %s\n", this->address(), + PrintF("Page@%p in %s\n", static_cast<void*>(this->address()), AllocationSpaceName(this->owner()->identity())); printf(" --------------------------------------\n"); HeapObjectIterator objects(this); unsigned mark_size = 0; for (HeapObject* object = objects.Next(); object != NULL; object = objects.Next()) { - bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)); + bool is_marked = Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object)); PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. if (is_marked) { mark_size += object->Size(); |