summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMatt Loring <mattloring@google.com>2017-08-03 07:23:13 -0700
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:16:30 +0200
commite55b7f3e26931a3c62d787182bd48e052a9ddc41 (patch)
tree9ccb677d7e67f473950df75807b9d16ae68c12ec /deps
parent5976e0fac9af2370ad7b2b54b7c223c6f8b3d046 (diff)
downloadandroid-node-v8-e55b7f3e26931a3c62d787182bd48e052a9ddc41.tar.gz
android-node-v8-e55b7f3e26931a3c62d787182bd48e052a9ddc41.tar.bz2
android-node-v8-e55b7f3e26931a3c62d787182bd48e052a9ddc41.zip
deps: backport f9c4b7a from upstream V8
Original commit message: [heap] Move UnmapFreeMemoryTask to CancelableTask This mitigates the problem of blocking on the main thread when the platform is unable to execute background tasks in a timely manner. Bug: v8:6671 Change-Id: I741d4b7594e8d62721dad32cbfb19551ffacd0c3 Reviewed-on: https://chromium-review.googlesource.com/599528 Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#47126} PR-URL: https://github.com/nodejs/node/pull/14001 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Tobias Nießen <tniessen@tnie.de>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/src/heap/heap.cc2
-rw-r--r--deps/v8/src/heap/heap.h4
-rw-r--r--deps/v8/src/heap/spaces.cc40
-rw-r--r--deps/v8/src/heap/spaces.h13
-rw-r--r--deps/v8/src/isolate.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc1
6 files changed, 40 insertions, 21 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 20b20024c3..6cc718840a 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -163,6 +163,7 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
+ use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
@@ -5850,6 +5851,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
+ use_tasks_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index b579c0288a..e90838e295 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -957,6 +957,8 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
+ bool use_tasks() const { return use_tasks_; }
+
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -2371,6 +2373,8 @@ class Heap {
bool fast_promotion_mode_;
+ bool use_tasks_;
+
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 6f4546c816..f275a4d518 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -302,7 +302,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)),
- unmapper_(this) {}
+ unmapper_(isolate->heap(), this) {}
bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
@@ -334,40 +334,46 @@ void MemoryAllocator::TearDown() {
code_range_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
public:
- explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
+ explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
+ : CancelableTask(isolate), unmapper_(unmapper) {}
private:
- // v8::Task overrides.
- void Run() override {
+ void RunInternal() override {
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
- Unmapper* unmapper_;
+ Unmapper* const unmapper_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
- if (FLAG_concurrent_sweeping) {
+ if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
+ if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
+ // kMaxUnmapperTasks are already running. Avoid creating any more.
+ return;
+ }
+ UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
+ DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
+ task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
- concurrent_unmapping_tasks_active_++;
+ task, v8::Platform::kShortRunningTask);
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
-bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
- bool waited = false;
- while (concurrent_unmapping_tasks_active_ > 0) {
- pending_unmapping_tasks_semaphore_.Wait();
- concurrent_unmapping_tasks_active_--;
- waited = true;
+void MemoryAllocator::Unmapper::WaitUntilCompleted() {
+ for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ }
+ concurrent_unmapping_tasks_active_ = 0;
}
- return waited;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
@@ -394,7 +400,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- WaitUntilCompleted();
+ CHECK_EQ(0, concurrent_unmapping_tasks_active_);
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index a8394dd486..2ae089b401 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -16,6 +16,7 @@
#include "src/base/hashmap.h"
#include "src/base/iterator.h"
#include "src/base/platform/mutex.h"
+#include "src/cancelable-task.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
@@ -1184,8 +1185,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
public:
class UnmapFreeMemoryTask;
- explicit Unmapper(MemoryAllocator* allocator)
- : allocator_(allocator),
+ Unmapper(Heap* heap, MemoryAllocator* allocator)
+ : heap_(heap),
+ allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
@@ -1219,13 +1221,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void FreeQueuedChunks();
- bool WaitUntilCompleted();
+ void WaitUntilCompleted();
void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private:
static const int kReservedQueueingSlots = 64;
+ static const int kMaxUnmapperTasks = 24;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1264,13 +1267,15 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
+ Heap* const heap_;
+ MemoryAllocator* const allocator_;
base::Mutex mutex_;
- MemoryAllocator* allocator_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
+ CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 5b1e26e1d0..48f5b30bd2 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -2455,6 +2455,7 @@ void Isolate::Deinit() {
wasm_compilation_manager_->TearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted();
+ heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
DumpAndResetStats();
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 26b2fdd193..b99913ab80 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -370,6 +370,7 @@ TEST(NewSpace) {
}
new_space.TearDown();
+ memory_allocator->unmapper()->WaitUntilCompleted();
memory_allocator->TearDown();
delete memory_allocator;
}