aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/heap-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/heap-inl.h')
-rw-r--r--deps/v8/src/heap/heap-inl.h290
1 files changed, 147 insertions, 143 deletions
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 65b791a42f..d617d9f9ac 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -15,23 +15,26 @@
#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
-#include "src/counters-inl.h"
#include "src/feedback-vector.h"
// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
#include "src/heap/spaces-inl.h"
+#include "src/isolate-data.h"
#include "src/isolate.h"
-#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
-#include "src/objects/literal-objects.h"
-#include "src/objects/microtask-queue-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
#include "src/zone/zone-list-inl.h"
@@ -50,32 +53,82 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::ToInt(object_));
}
-HeapObject* AllocationResult::ToObjectChecked() {
+HeapObject AllocationResult::ToObjectChecked() {
CHECK(!IsRetry());
return HeapObject::cast(object_);
}
-#define ROOT_ACCESSOR(type, name, CamelName) \
- type* Heap::name() { return type::cast(roots_[RootIndex::k##CamelName]); }
+Isolate* Heap::isolate() {
+ return reinterpret_cast<Isolate*>(
+ reinterpret_cast<intptr_t>(this) -
+ reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
+}
+
+int64_t Heap::external_memory() {
+ return isolate()->isolate_data()->external_memory_;
+}
+
+void Heap::update_external_memory(int64_t delta) {
+ isolate()->isolate_data()->external_memory_ += delta;
+}
+
+void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
+ external_memory_concurrently_freed_ += freed;
+}
+
+void Heap::account_external_memory_concurrently_freed() {
+ isolate()->isolate_data()->external_memory_ -=
+ external_memory_concurrently_freed_;
+ external_memory_concurrently_freed_ = 0;
+}
+
+RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
+
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type Heap::name() { \
+ return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
+ }
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define ROOT_ACCESSOR(type, name, CamelName) \
- void Heap::set_##name(type* value) { \
+ void Heap::set_##name(type value) { \
/* The deserializer makes use of the fact that these common roots are */ \
/* never in new space and never on a page that is being compacted. */ \
- DCHECK(!deserialization_complete() || \
- RootCanBeWrittenAfterInitialization(RootIndex::k##CamelName)); \
- DCHECK_IMPLIES(static_cast<int>(RootIndex::k##CamelName) < kOldSpaceRoots, \
- !InNewSpace(value)); \
- roots_[RootIndex::k##CamelName] = value; \
+ DCHECK_IMPLIES(deserialization_complete(), \
+ !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
+ DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
+ IsImmovable(HeapObject::cast(value))); \
+ roots_table()[RootIndex::k##CamelName] = value->ptr(); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+void Heap::SetRootMaterializedObjects(FixedArray objects) {
+ roots_table()[RootIndex::kMaterializedObjects] = objects->ptr();
+}
+
+void Heap::SetRootScriptList(Object value) {
+ roots_table()[RootIndex::kScriptList] = value->ptr();
+}
+
+void Heap::SetRootStringTable(StringTable value) {
+ roots_table()[RootIndex::kStringTable] = value->ptr();
+}
+
+void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
+ roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value->ptr();
+}
+
+void Heap::SetMessageListeners(TemplateList value) {
+ roots_table()[RootIndex::kMessageListeners] = value->ptr();
+}
+
PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE);
DCHECK_NE(idx, NEW_SPACE);
+ DCHECK_NE(idx, CODE_LO_SPACE);
+ DCHECK_NE(idx, NEW_LO_SPACE);
return static_cast<PagedSpace*>(space_[idx]);
}
@@ -118,24 +171,19 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
#endif
#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
+ IncrementObjectCounters();
#endif
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
- bool new_large_object = FLAG_young_generation_large_objects &&
- size_in_bytes > kMaxNewSpaceHeapObjectSize;
- HeapObject* object = nullptr;
+
+ HeapObject object;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
- space = LO_SPACE;
+ // TODO(hpayer): Implement a LO tenuring strategy.
+ space = FLAG_young_generation_large_objects ? NEW_LO_SPACE : LO_SPACE;
} else {
- if (new_large_object) {
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
- }
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -146,19 +194,25 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
// Here we only allocate in the old generation.
if (OLD_SPACE == space) {
if (large_object) {
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
}
} else if (CODE_SPACE == space) {
- if (size_in_bytes <= code_space()->AreaSize()) {
+ if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
- allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
}
} else if (LO_SPACE == space) {
DCHECK(large_object);
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
+ } else if (NEW_LO_SPACE == space) {
+ DCHECK(FLAG_young_generation_large_objects);
+ allocation = new_lo_space_->AllocateRaw(size_in_bytes);
+ } else if (CODE_LO_SPACE == space) {
+ DCHECK(large_object);
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (RO_SPACE == space) {
@@ -186,7 +240,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
return allocation;
}
-void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object->address(), size_in_bytes);
}
@@ -212,46 +266,13 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
}
}
-
-void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
- int size_in_bytes) {
- HeapProfiler* heap_profiler = isolate_->heap_profiler();
- if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(source->address(), target->address(),
- size_in_bytes);
- }
- for (auto& tracker : allocation_trackers_) {
- tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
- }
- if (target->IsSharedFunctionInfo()) {
- LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
- target->address()));
- }
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(source);
- UpdateAllocationsHash(target);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- }
-}
-
bool Heap::CanAllocateInReadOnlySpace() {
return !deserialization_complete_ &&
(isolate()->serializer_enabled() ||
!isolate()->initialized_from_snapshot());
}
-void Heap::UpdateAllocationsHash(HeapObject* object) {
+void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner()->identity();
@@ -264,7 +285,6 @@ void Heap::UpdateAllocationsHash(HeapObject* object) {
UpdateAllocationsHash(value);
}
-
void Heap::UpdateAllocationsHash(uint32_t value) {
uint16_t c1 = static_cast<uint16_t>(value);
uint16_t c2 = static_cast<uint16_t>(value >> 16);
@@ -274,14 +294,13 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
-
-void Heap::RegisterExternalString(String* string) {
+void Heap::RegisterExternalString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!string->IsThinString());
external_string_table_.AddString(string);
}
-void Heap::UpdateExternalString(String* string, size_t old_payload,
+void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
@@ -294,10 +313,10 @@ void Heap::UpdateExternalString(String* string, size_t old_payload,
ExternalBackingStoreType::kExternalString, new_payload - old_payload);
}
-void Heap::FinalizeExternalString(String* string) {
+void Heap::FinalizeExternalString(String string) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
- ExternalString* ext_string = ExternalString::cast(string);
+ ExternalString ext_string = ExternalString::cast(string);
page->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
@@ -305,8 +324,7 @@ void Heap::FinalizeExternalString(String* string) {
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
- kHeapObjectTag);
+ string->address() + ExternalString::kResourceOffset);
// Dispose of the C++ object if it has not already been disposed.
if (*resource_addr != nullptr) {
@@ -318,19 +336,19 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
// static
-bool Heap::InNewSpace(Object* object) {
+bool Heap::InNewSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
}
// static
-bool Heap::InNewSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InNewSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
}
// static
-bool Heap::InNewSpace(HeapObject* heap_object) {
+bool Heap::InNewSpace(HeapObject heap_object) {
// Inlined check from NewSpace::Contains.
bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
#ifdef DEBUG
@@ -346,56 +364,48 @@ bool Heap::InNewSpace(HeapObject* heap_object) {
}
// static
-bool Heap::InFromSpace(Object* object) {
+bool Heap::InFromSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
// static
-bool Heap::InFromSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InFromSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
}
// static
-bool Heap::InFromSpace(HeapObject* heap_object) {
+bool Heap::InFromSpace(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)
->IsFlagSet(Page::IN_FROM_SPACE);
}
// static
-bool Heap::InToSpace(Object* object) {
+bool Heap::InToSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
}
// static
-bool Heap::InToSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InToSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
}
// static
-bool Heap::InToSpace(HeapObject* heap_object) {
+bool Heap::InToSpace(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
}
-bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
+bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
-bool Heap::InReadOnlySpace(Object* object) {
+bool Heap::InReadOnlySpace(Object object) {
return read_only_space_->Contains(object);
}
-bool Heap::InNewSpaceSlow(Address address) {
- return new_space_->ContainsSlow(address);
-}
-
-bool Heap::InOldSpaceSlow(Address address) {
- return old_space_->ContainsSlow(address);
-}
-
// static
-Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
+Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
@@ -415,27 +425,29 @@ bool Heap::ShouldBePromoted(Address old_address) {
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
- static_cast<size_t>(byte_size / kPointerSize));
+ DCHECK(IsAligned(byte_size, kTaggedSize));
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ CopyWords(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
}
template <Heap::FindMementoMode mode>
-AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
+AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
Address object_address = object->address();
Address memento_address = object_address + object->SizeFromMap(map);
- Address last_memento_word_address = memento_address + kPointerSize;
+ Address last_memento_word_address = memento_address + kTaggedSize;
// If the memento would be on another page, bail out immediately.
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
- return nullptr;
+ return AllocationMemento();
}
- HeapObject* candidate = HeapObject::FromAddress(memento_address);
- Map* candidate_map = candidate->map();
+ HeapObject candidate = HeapObject::FromAddress(memento_address);
+ MapWordSlot candidate_map_slot = candidate->map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
- MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
- if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
- return nullptr;
+ MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
+ if (!candidate_map_slot.contains_value(
+ ReadOnlyRoots(this).allocation_memento_map().ptr())) {
+ return AllocationMemento();
}
// Bail out if the memento is below the age mark, which can happen when
@@ -445,15 +457,15 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
Address age_mark =
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
if (!object_page->Contains(age_mark)) {
- return nullptr;
+ return AllocationMemento();
}
// Do an exact check in the case where the age mark is on the same page.
if (object_address < age_mark) {
- return nullptr;
+ return AllocationMemento();
}
}
- AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
+ AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
// Depending on what the memento is used for, we might need to perform
// additional checks.
@@ -462,7 +474,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
case Heap::kForGC:
return memento_candidate;
case Heap::kForRuntime:
- if (memento_candidate == nullptr) return nullptr;
+ if (memento_candidate.is_null()) return AllocationMemento();
// Either the object is the last object in the new space, or there is
// another object of at least word size (the header map word) following
// it, so suffices to compare ptr and top here.
@@ -473,43 +485,37 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate;
}
- return nullptr;
+ return AllocationMemento();
default:
UNREACHABLE();
}
UNREACHABLE();
}
-void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
+void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
- DCHECK(
- InFromSpace(object) ||
- (InToSpace(object) && Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
- (!InNewSpace(object) && Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
+ DCHECK(InFromSpace(object) ||
+ (InToSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION)) ||
+ (!InNewSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
+ Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(map->instance_type()))
+ !AllocationSite::CanTrack(map->instance_type())) {
return;
- AllocationMemento* memento_candidate =
+ }
+ AllocationMemento memento_candidate =
FindAllocationMemento<kForGC>(map, object);
- if (memento_candidate == nullptr) return;
+ if (memento_candidate.is_null()) return;
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
- (*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
+ (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
-Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(
- reinterpret_cast<intptr_t>(this) -
- reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
-}
-
-void Heap::ExternalStringTable::AddString(String* string) {
+void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));
@@ -520,14 +526,15 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-Oddball* Heap::ToBoolean(bool condition) {
+Oddball Heap::ToBoolean(bool condition) {
ReadOnlyRoots roots(this);
return condition ? roots.true_value() : roots.false_value();
}
uint64_t Heap::HashSeed() {
uint64_t seed;
- hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
+ ReadOnlyRoots(this).hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed),
+ kInt64Size);
DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
@@ -595,12 +602,11 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndWritable();
- LargePage* page = heap_->lo_space()->first_page();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndWritable();
page = page->next_page();
}
}
@@ -609,13 +615,12 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
- heap_->code_space()->SetReadAndExecutable();
- LargePage* page = heap_->lo_space()->first_page();
+ heap_->code_space()->SetDefaultCodePermissions();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetDefaultCodePermissions();
page = page->next_page();
}
}
@@ -646,15 +651,14 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == LO_SPACE &&
- chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
+ (chunk_->owner()->identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) {
- chunk_->SetReadAndExecutable();
+ chunk_->SetDefaultCodePermissions();
}
}