summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/array-buffer-tracker.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/array-buffer-tracker.cc')
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc33
1 files changed, 23 insertions, 10 deletions
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index fdca6e8df2..b284a65f66 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -11,6 +11,11 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
@@ -20,7 +25,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
- std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
+ std::vector<std::shared_ptr<BackingStore>> backing_stores_to_free;
TrackingData kept_array_buffers;
JSArrayBuffer new_buffer;
@@ -32,8 +37,9 @@ void LocalArrayBufferTracker::Process(Callback callback) {
DCHECK_EQ(page_, Page::FromHeapObject(old_buffer));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
- kept_array_buffers.insert(*it);
+ kept_array_buffers.insert(std::move(*it));
} else if (result == kUpdateEntry) {
+ DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length());
DCHECK(!new_buffer.is_null());
Page* target_page = Page::FromHeapObject(new_buffer);
{
@@ -44,22 +50,28 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t length = it->second.length;
+ const size_t length = PerIsolateAccountingLength(old_buffer);
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
- DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory);
- tracker->AddInternal(new_buffer, length);
+ tracker->AddInternal(new_buffer, std::move(it->second));
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer,
static_cast<MemoryChunk*>(page_),
static_cast<MemoryChunk*>(target_page), length);
}
} else if (result == kRemoveEntry) {
- freed_memory += it->second.length;
- // We pass backing_store() and stored length to the collector for freeing
- // the backing store. Wasm allocations will go through their own tracker
- // based on the backing store.
- backing_stores_to_free.push_back(it->second);
+ freed_memory += PerIsolateAccountingLength(old_buffer);
+ auto backing_store = std::move(it->second);
+ TRACE_BS("ABT:queue bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store.use_count());
+ if (!backing_store->is_shared()) {
+ // Only retain non-shared backing stores. For shared backing stores,
+ // drop the shared_ptr right away, since this should be cheap,
+ // as it only updates a refcount, except that last, which will
+ // destruct it, which is rare.
+ backing_stores_to_free.push_back(backing_store);
+ }
} else {
UNREACHABLE();
}
@@ -147,3 +159,4 @@ void ArrayBufferTracker::TearDown(Heap* heap) {
} // namespace internal
} // namespace v8
+#undef TRACE_BS