summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-memory.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-memory.cc')
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc93
1 files changed, 40 insertions, 53 deletions
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index fcbe60ae0e..38cd8973a6 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+#else
+ static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true;
}
allocated_address_space_ -= num_bytes;
-#endif
return false;
}
@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
- // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (require_guard_regions) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
-
- WasmAllocationTracker* const allocation_tracker =
- isolate->wasm_engine()->allocation_tracker();
-
- // Let the WasmAllocationTracker know we are going to reserve a bunch of
- // address space.
- if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
- // If we are over the address space limit, fail.
- return nullptr;
- }
-
- // The Reserve makes the whole region inaccessible by default.
- *allocation_base =
- isolate->array_buffer_allocator()->Reserve(*allocation_length);
- if (*allocation_base == nullptr) {
- allocation_tracker->ReleaseAddressSpace(*allocation_length);
- return nullptr;
- }
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ *allocation_length = require_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ : base::bits::RoundUpToPowerOfTwo32(RoundUp(
+ static_cast<uint32_t>(size), kWasmPageSize));
+ DCHECK_GE(*allocation_length, size);
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
- void* memory = *allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- // TODO(titzer): use guard regions for minicage and merge with above code.
- CHECK_LE(size, kV8MaxWasmMemoryBytes);
- *allocation_length =
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
- void* memory =
- size == 0
- ? nullptr
- : isolate->array_buffer_allocator()->Allocate(*allocation_length);
- *allocation_base = memory;
- return memory;
+ // The Reserve makes the whole region inaccessible by default.
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
+ return nullptr;
}
+
+ void* memory = *allocation_base;
+
+ // Make the part we care about accessible.
+ CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite));
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
+ // All buffers have guard regions now, but sometimes they are small.
+ constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, require_guard_regions, shared);
+ size, is_external, has_guard_region, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,