summaryrefslogtreecommitdiff
path: root/deps/v8/src/allocation.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/allocation.cc')
-rw-r--r--deps/v8/src/allocation.cc70
1 files changed, 43 insertions, 27 deletions
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 6327a9c965..4be8fb4084 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -11,8 +11,9 @@
#include "src/base/lsan-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
-#include "src/utils.h"
+#include "src/memcopy.h"
#include "src/v8.h"
+#include "src/vector.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
@@ -37,29 +38,34 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
return ptr;
}
-// TODO(bbudge) Simplify this once all embedders implement a page allocator.
-struct InitializePageAllocator {
- static void Construct(void* page_allocator_ptr_arg) {
- auto page_allocator_ptr =
- reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
- v8::PageAllocator* page_allocator =
- V8::GetCurrentPlatform()->GetPageAllocator();
- if (page_allocator == nullptr) {
- static v8::base::PageAllocator default_allocator;
- page_allocator = &default_allocator;
+class PageAllocatorInitializer {
+ public:
+ PageAllocatorInitializer() {
+ page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
+ if (page_allocator_ == nullptr) {
+ static base::LeakyObject<base::PageAllocator> default_page_allocator;
+ page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
- {
- static v8::base::LsanPageAllocator lsan_allocator(page_allocator);
- page_allocator = &lsan_allocator;
- }
+ static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
+ page_allocator_);
+ page_allocator_ = lsan_allocator.get();
#endif
- *page_allocator_ptr = page_allocator;
}
+
+ PageAllocator* page_allocator() const { return page_allocator_; }
+
+ void SetPageAllocatorForTesting(PageAllocator* allocator) {
+ page_allocator_ = allocator;
+ }
+
+ private:
+ PageAllocator* page_allocator_;
};
-static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
- page_allocator = LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
+ GetPageTableInitializer);
+
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
@@ -67,8 +73,15 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
- DCHECK_NOT_NULL(page_allocator.Get());
- return page_allocator.Get();
+ DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
+ return GetPageTableInitializer()->page_allocator();
+}
+
+v8::PageAllocator* SetPlatformPageAllocatorForTesting(
+ v8::PageAllocator* new_page_allocator) {
+ v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
+ GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
+ return old_page_allocator;
}
void* Malloced::New(size_t size) {
@@ -111,7 +124,7 @@ void* AllocWithRetry(size_t size) {
}
void* AlignedAlloc(size_t size, size_t alignment) {
- DCHECK_LE(V8_ALIGNOF(void*), alignment);
+ DCHECK_LE(alignof(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
@@ -155,7 +168,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
- DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
+ DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result = page_allocator->AllocatePages(address, size, alignment, access);
@@ -169,7 +182,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
bool FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
- DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
+ DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
return page_allocator->FreePages(address, size);
}
@@ -177,6 +190,7 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
+ DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
return page_allocator->ReleasePages(address, size, new_size);
}
@@ -209,12 +223,14 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, size_t alignment)
: page_allocator_(page_allocator) {
DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
size_t page_size = page_allocator_->AllocatePageSize();
alignment = RoundUp(alignment, page_size);
- size = RoundUp(size, page_size);
- Address address = reinterpret_cast<Address>(AllocatePages(
- page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
+ Address address = reinterpret_cast<Address>(
+ AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
+ PageAllocator::kNoAccess));
if (address != kNullAddress) {
+ DCHECK(IsAligned(address, alignment));
region_ = base::AddressRegion(address, size);
}
}
@@ -241,7 +257,7 @@ bool VirtualMemory::SetPermissions(Address address, size_t size,
size_t VirtualMemory::Release(Address free_start) {
DCHECK(IsReserved());
- DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize()));
+ DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.