summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-memory.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-memory.cc')
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc150
1 files changed, 79 insertions, 71 deletions
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index bda06e42cd..f7cc70a9e7 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,10 +4,12 @@
#include <limits>
-#include "src/wasm/wasm-memory.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
@@ -18,6 +20,12 @@ namespace {
constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
+void AddAllocationStatusSample(Isolate* isolate,
+ WasmMemoryTracker::AllocationStatus status) {
+ isolate->counters()->wasm_memory_allocation_result()->AddSample(
+ static_cast<int>(status));
+}
+
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, bool require_full_guard_regions,
void** allocation_base,
@@ -31,6 +39,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
//
// To protect against 32-bit integer overflow issues, we also protect the 2GiB
// before the valid part of the memory buffer.
+ // TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
@@ -43,37 +52,45 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
- // require two GCs.
- // TODO(gc): Fix this to only require one GC (crbug.com/v8/7621).
+ // require two GCs because the first GC maybe incremental and may have
+ // floating garbage.
+ static constexpr int kAllocationRetries = 2;
bool did_retry = false;
for (int trial = 0;; ++trial) {
if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
- // Collect garbage and retry.
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
did_retry = true;
// After first and second GC: retry.
- if (trial < 2) continue;
- // We are over the address space limit. Fail.
- //
- // When running under the correctness fuzzer (i.e.
- // --abort-on-stack-or-string-length-overflow is preset), we crash instead
- // so it is not incorrectly reported as a correctness violation. See
- // https://crbug.com/828293#c4
- if (FLAG_abort_on_stack_or_string_length_overflow) {
- FATAL("could not allocate wasm memory");
+ if (trial == kAllocationRetries) {
+ // We are over the address space limit. Fail.
+ //
+ // When running under the correctness fuzzer (i.e.
+ // --abort-on-stack-or-string-length-overflow is preset), we crash instead
+ // so it is not incorrectly reported as a correctness violation. See
+ // https://crbug.com/828293#c4
+ if (FLAG_abort_on_stack_or_string_length_overflow) {
+ FATAL("could not allocate wasm memory");
+ }
+ AddAllocationStatusSample(
+ heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
+ return nullptr;
}
- memory_tracker->AddAllocationStatusSample(
- AllocationStatus::kAddressSpaceLimitReachedFailure);
- return nullptr;
+ // Collect garbage and retry.
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
// The Reserve makes the whole region inaccessible by default.
- *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
- PageAllocator::kNoAccess);
- if (*allocation_base == nullptr) {
- memory_tracker->ReleaseReservation(*allocation_length);
- memory_tracker->AddAllocationStatusSample(AllocationStatus::kOtherFailure);
- return nullptr;
+ DCHECK_NULL(*allocation_base);
+ for (int trial = 0;; ++trial) {
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base != nullptr) break;
+ if (trial == kAllocationRetries) {
+ memory_tracker->ReleaseReservation(*allocation_length);
+ AddAllocationStatusSample(heap->isolate(),
+ AllocationStatus::kOtherFailure);
+ return nullptr;
+ }
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
byte* memory = reinterpret_cast<byte*>(*allocation_base);
if (require_full_guard_regions) {
@@ -91,11 +108,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
}
}
- memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
- memory, size);
- memory_tracker->AddAllocationStatusSample(
- did_retry ? AllocationStatus::kSuccessAfterRetry
- : AllocationStatus::kSuccess);
+ memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
+ *allocation_length, memory, size);
+ AddAllocationStatusSample(heap->isolate(),
+ did_retry ? AllocationStatus::kSuccessAfterRetry
+ : AllocationStatus::kSuccess);
return memory;
}
} // namespace
@@ -118,17 +135,21 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
- constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4GiB
+ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
- constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+ constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
#endif
- size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
- DCHECK_GE(old_count + num_bytes, old_count);
- if (old_count + num_bytes <= kAddressSpaceLimit) {
- return true;
- }
- reserved_address_space_ -= num_bytes;
+ int retries = 5; // cmpxchng can fail, retry some number of times.
+ do {
+ size_t old_count = reserved_address_space_;
+ if ((kAddressSpaceLimit - old_count) < num_bytes) return false;
+ if (reserved_address_space_.compare_exchange_weak(old_count,
+ old_count + num_bytes)) {
+ return true;
+ }
+ } while (retries-- > 0);
+
return false;
}
@@ -138,14 +159,15 @@ void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
DCHECK_LE(num_bytes, old_reserved);
}
-void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
+void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
+ void* allocation_base,
size_t allocation_length,
void* buffer_start,
size_t buffer_length) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
- AddAddressSpaceSample();
+ AddAddressSpaceSample(isolate);
allocations_.emplace(buffer_start,
AllocationData{allocation_base, allocation_length,
@@ -153,12 +175,7 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
- const void* buffer_start) {
- return InternalReleaseAllocation(buffer_start);
-}
-
-WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
- const void* buffer_start) {
+ Isolate* isolate, const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start);
@@ -170,7 +187,10 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
DCHECK_LE(num_bytes, allocated_address_space_);
reserved_address_space_ -= num_bytes;
allocated_address_space_ -= num_bytes;
- AddAddressSpaceSample();
+ // ReleaseAllocation might be called with a nullptr as isolate if the
+ // embedder is releasing the allocation and not a specific isolate. This
+ // happens if the allocation was shared between multiple isolates (threads).
+ if (isolate) AddAddressSpaceSample(isolate);
AllocationData allocation_data = find_result->second;
allocations_.erase(find_result);
@@ -209,28 +229,21 @@ bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
return start + kWasmMaxHeapOffset < limit;
}
-bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
+bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
+ const void* buffer_start) {
if (IsWasmMemory(buffer_start)) {
- const AllocationData allocation = ReleaseAllocation(buffer_start);
+ const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
return true;
}
return false;
}
-void WasmMemoryTracker::AddAllocationStatusSample(AllocationStatus status) {
- if (allocation_result_) {
- allocation_result_->AddSample(static_cast<int>(status));
- }
-}
-
-void WasmMemoryTracker::AddAddressSpaceSample() {
- if (address_space_usage_mb_) {
- // Report address space usage in MiB so the full range fits in an int on all
- // platforms.
- address_space_usage_mb_->AddSample(
- static_cast<int>(allocated_address_space_ >> 20));
- }
+void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
+ // Report address space usage in MiB so the full range fits in an int on all
+ // platforms.
+ isolate->counters()->wasm_address_space_usage_mb()->AddSample(
+ static_cast<int>(allocated_address_space_ >> 20));
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
@@ -238,11 +251,9 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(shared, TENURED);
- DCHECK_GE(kMaxInt, size);
- if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
constexpr bool is_wasm_memory = true;
- JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
- static_cast<int>(size), shared, is_wasm_memory);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
+ shared, is_wasm_memory);
buffer->set_is_neuterable(false);
buffer->set_is_growable(true);
return buffer;
@@ -250,13 +261,10 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
SharedFlag shared) {
- // Check against kMaxInt, since the byte length is stored as int in the
- // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
- // line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
- // TODO(titzer): lift restriction on maximum memory allocated here.
- return {};
- }
+ // Enforce engine-limited maximum allocation size.
+ if (size > kV8MaxWasmMemoryBytes) return {};
+ // Enforce flag-limited maximum allocation size.
+ if (size > (FLAG_wasm_max_mem_pages * uint64_t{kWasmPageSize})) return {};
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();