summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/spaces.cc')
-rw-r--r--deps/v8/src/heap/spaces.cc163
1 files changed, 84 insertions, 79 deletions
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index dd8ba30101..2c5d5c298d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -11,14 +11,14 @@
#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@@ -220,7 +220,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested));
heap_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
+ code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator, aligned_base, size,
static_cast<size_t>(MemoryChunk::kAlignment));
code_page_allocator_ = code_page_allocator_instance_.get();
@@ -286,7 +286,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
return;
}
- auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
+ auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
@@ -699,6 +699,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
@@ -856,6 +857,33 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
+void Page::MoveOldToNewRememberedSetForSweeping() {
+ CHECK_NULL(sweeping_slot_set_);
+ sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
+ slot_set_[OLD_TO_NEW] = nullptr;
+}
+
+void Page::MergeOldToNewRememberedSets() {
+ if (sweeping_slot_set_ == nullptr) return;
+
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ this,
+ [this](MaybeObjectSlot slot) {
+ Address address = slot.address();
+ RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (slot_set_[OLD_TO_NEW]) {
+ ReleaseSlotSet<OLD_TO_NEW>();
+ }
+
+ CHECK_NULL(slot_set_[OLD_TO_NEW]);
+ slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
+ sweeping_slot_set_ = nullptr;
+}
+
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
@@ -1376,6 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
}
ReleaseSlotSet<OLD_TO_NEW>();
+ ReleaseSlotSet(&sweeping_slot_set_);
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
@@ -1399,11 +1428,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
- SlotSet* slot_set = new SlotSet[pages];
- for (size_t i = 0; i < pages; i++) {
- slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
- }
- return slot_set;
+ return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
@@ -1411,15 +1436,23 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
+ return AllocateSlotSet(&slot_set_[type]);
+}
+
+SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
+ return AllocateSlotSet(&sweeping_slot_set_);
+}
+
+SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
+ SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
- &slot_set_[type], nullptr, slot_set);
+ slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
- delete[] slot_set;
- slot_set = old_slot_set;
+ delete[] new_slot_set;
+ new_slot_set = old_slot_set;
}
- DCHECK(slot_set);
- return slot_set;
+ DCHECK(new_slot_set);
+ return new_slot_set;
}
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
@@ -1427,10 +1460,13 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type];
- if (slot_set) {
- slot_set_[type] = nullptr;
- delete[] slot_set;
+ ReleaseSlotSet(&slot_set_[type]);
+}
+
+void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
+ if (*slot_set) {
+ delete[] * slot_set;
+ *slot_set = nullptr;
}
}
@@ -1484,15 +1520,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
- int size) {
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
@@ -1509,27 +1542,17 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>();
}
- InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
- InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
+ invalidated_slots<type>()->insert(object);
+}
- if (it != invalidated_slots->end() && it->first == object) {
- // object was already inserted
- CHECK_LE(size, it->second);
- return;
+void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // We cannot check slot_set_[OLD_TO_OLD] here, since the
+ // concurrent markers might insert slots concurrently.
+ RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
- it = invalidated_slots->insert(it, std::make_pair(object, size));
-
- // prevent overlapping invalidated objects for old-to-new.
- if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
- HeapObject pred = (--it)->first;
- int pred_size = it->second;
- DCHECK_LT(pred.address(), object.address());
-
- if (pred.address() + pred_size > object.address()) {
- it->second = static_cast<int>(object.address() - pred.address());
- }
- }
+ RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
@@ -1546,27 +1569,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
-template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject old_start, HeapObject new_start);
-
-template <RememberedSetType type>
-void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
- HeapObject new_start) {
- DCHECK_LT(old_start, new_start);
- DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
- MemoryChunk::FromHeapObject(new_start));
- static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
- if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
- auto it = invalidated_slots<type>()->find(old_start);
- if (it != invalidated_slots<type>()->end()) {
- int old_size = it->second;
- int delta = static_cast<int>(new_start.address() - old_start.address());
- invalidated_slots<type>()->erase(it);
- (*invalidated_slots<type>())[new_start] = old_size - delta;
- }
- }
-}
-
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1657,6 +1659,7 @@ void PagedSpace::RefillFreeList() {
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
+
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
@@ -1667,6 +1670,15 @@ void PagedSpace::RefillFreeList() {
category->Reset(free_list());
});
}
+
+ // Also merge old-to-new remembered sets outside of collections.
+ // Do not do this during GC, because of races during scavenges.
+ // One thread might iterate remembered set, while another thread merges
+ // them.
+ if (!is_local()) {
+ p->MergeOldToNewRememberedSets();
+ }
+
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -1709,6 +1721,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
+
+ p->MergeOldToNewRememberedSets();
+
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
@@ -1883,19 +1898,8 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower limit.
size_t step = GetNextInlineAllocationStepSize();
-
- // TODO(ofrobots): there is subtle difference between old space and new
- // space here. Any way to avoid it? `step - 1` makes more sense as we would
- // like to sample the object that straddles the `start + step` boundary.
- // Rounding down further would introduce a small statistical error in
- // sampling. However, presently PagedSpace requires limit to be aligned.
- size_t rounded_step;
- if (identity() == NEW_SPACE) {
- DCHECK_GE(step, 1);
- rounded_step = step - 1;
- } else {
- rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
- }
+ size_t rounded_step =
+ RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
return Min(static_cast<Address>(start + min_size + rounded_step), end);
} else {
// The entire node can be used as the linear allocation area.
@@ -2139,7 +2143,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2628,7 +2632,7 @@ void NewSpace::Verify(Isolate* isolate) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -3942,6 +3946,7 @@ Address LargePage::GetAddressToShrink(Address object_address,
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
+ DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),