aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/src/cancelable-task.cc13
-rw-r--r--deps/v8/src/cancelable-task.h18
-rw-r--r--deps/v8/src/heap/item-parallel-job.h3
-rw-r--r--deps/v8/src/heap/mark-compact.h2
-rw-r--r--deps/v8/test/unittests/cancelable-tasks-unittest.cc4
5 files changed, 21 insertions, 19 deletions
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index b0387f4dc0..9e48fe7593 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -29,18 +29,17 @@ Cancelable::~Cancelable() {
CancelableTaskManager::CancelableTaskManager()
: task_id_counter_(0), canceled_(false) {}
-uint32_t CancelableTaskManager::Register(Cancelable* task) {
+CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard<base::Mutex> guard(&mutex_);
- uint32_t id = ++task_id_counter_;
- // The loop below is just used when task_id_counter_ overflows.
- while (cancelable_tasks_.count(id) > 0) ++id;
+ CancelableTaskManager::Id id = ++task_id_counter_;
+ // Id overflows are not supported.
+ CHECK_NE(0, id);
CHECK(!canceled_);
cancelable_tasks_[id] = task;
return id;
}
-
-void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
+void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
base::LockGuard<base::Mutex> guard(&mutex_);
size_t removed = cancelable_tasks_.erase(id);
USE(removed);
@@ -49,7 +48,7 @@ void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
}
CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
- uint32_t id) {
+ CancelableTaskManager::Id id) {
base::LockGuard<base::Mutex> guard(&mutex_);
auto entry = cancelable_tasks_.find(id);
if (entry != cancelable_tasks_.end()) {
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 5b1a5f1def..8a1ad325c8 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -5,7 +5,7 @@
#ifndef V8_CANCELABLE_TASK_H_
#define V8_CANCELABLE_TASK_H_
-#include <map>
+#include <unordered_map>
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
@@ -24,12 +24,14 @@ class Isolate;
// from any fore- and background task/thread.
class V8_EXPORT_PRIVATE CancelableTaskManager {
public:
+ using Id = uint64_t;
+
CancelableTaskManager();
// Registers a new cancelable {task}. Returns the unique {id} of the task that
// can be used to try to abort a task by calling {Abort}.
// Must not be called after CancelAndWait.
- uint32_t Register(Cancelable* task);
+ Id Register(Cancelable* task);
// Try to abort running a task identified by {id}. The possible outcomes are:
// (1) The task is already finished running or was canceled before and
@@ -39,7 +41,7 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// removed.
//
enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
- TryAbortResult TryAbort(uint32_t id);
+ TryAbortResult TryAbort(Id id);
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
@@ -59,13 +61,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
private:
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
- void RemoveFinishedTask(uint32_t id);
+ void RemoveFinishedTask(Id id);
// To mitigate the ABA problem, the api refers to tasks through an id.
- uint32_t task_id_counter_;
+ Id task_id_counter_;
// A set of cancelable tasks that are currently registered.
- std::map<uint32_t, Cancelable*> cancelable_tasks_;
+ std::unordered_map<Id, Cancelable*> cancelable_tasks_;
// Mutex and condition variable enabling concurrent register and removing, as
// well as waiting for background tasks on {CancelAndWait}.
@@ -89,7 +91,7 @@ class V8_EXPORT_PRIVATE Cancelable {
// a platform. This step transfers ownership to the platform, which destroys
// the task after running it. Since the exact time is not known, we cannot
// access the object after handing it to a platform.
- uint32_t id() { return id_; }
+ CancelableTaskManager::Id id() { return id_; }
protected:
bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
@@ -120,7 +122,7 @@ class V8_EXPORT_PRIVATE Cancelable {
CancelableTaskManager* parent_;
base::AtomicValue<Status> status_;
- uint32_t id_;
+ CancelableTaskManager::Id id_;
// The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index c3228403ff..4c2b37e9e6 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -133,7 +133,8 @@ class ItemParallelJob {
const size_t num_tasks = tasks_.size();
const size_t num_items = items_.size();
const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- uint32_t* task_ids = new uint32_t[num_tasks];
+ CancelableTaskManager::Id* task_ids =
+ new CancelableTaskManager::Id[num_tasks];
size_t start_index = 0;
Task* main_task = nullptr;
Task* task = nullptr;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 2babbc9296..e32ab4c6f1 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -471,7 +471,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Heap* const heap_;
int num_tasks_;
- uint32_t task_ids_[kMaxSweeperTasks];
+ CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
index eb5dd91589..d0462877f5 100644
--- a/deps/v8/test/unittests/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
@@ -180,7 +180,7 @@ TEST(CancelableTask, RemoveBeforeCancelAndWait) {
ResultType result1 = 0;
TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
ThreadedRunner runner1(task1);
- uint32_t id = task1->id();
+ CancelableTaskManager::Id id = task1->id();
EXPECT_EQ(id, 1u);
EXPECT_TRUE(manager.TryAbort(id));
runner1.Start();
@@ -195,7 +195,7 @@ TEST(CancelableTask, RemoveAfterCancelAndWait) {
ResultType result1 = 0;
TestTask* task1 = new TestTask(&manager, &result1);
ThreadedRunner runner1(task1);
- uint32_t id = task1->id();
+ CancelableTaskManager::Id id = task1->id();
EXPECT_EQ(id, 1u);
runner1.Start();
runner1.Join();