summaryrefslogtreecommitdiff
path: root/deps/v8/src/cancelable-task.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/cancelable-task.cc')
-rw-r--r--deps/v8/src/cancelable-task.cc63
1 files changed, 20 insertions, 43 deletions
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index d231bb799d..defbb44775 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -14,7 +14,6 @@ namespace internal {
Cancelable::Cancelable(CancelableTaskManager* parent)
: parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
id_ = parent->Register(this);
- CHECK(id_ != 0);
}
@@ -27,49 +26,35 @@ Cancelable::~Cancelable() {
}
}
-
-static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
-
-
-CancelableTaskManager::CancelableTaskManager()
- : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
-
+CancelableTaskManager::CancelableTaskManager() : task_id_counter_(0) {}
uint32_t CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard<base::Mutex> guard(&mutex_);
uint32_t id = ++task_id_counter_;
// The loop below is just used when task_id_counter_ overflows.
- while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
- id) != nullptr)) {
- ++id;
- }
- HashMap::Entry* entry =
- cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
- entry->value = task;
+ while (cancelable_tasks_.count(id) > 0) ++id;
+ cancelable_tasks_[id] = task;
return id;
}
void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
- void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ size_t removed = cancelable_tasks_.erase(id);
USE(removed);
- DCHECK(removed != nullptr);
+ DCHECK_NE(0, removed);
cancelable_tasks_barrier_.NotifyOne();
}
bool CancelableTaskManager::TryAbort(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
- HashMap::Entry* entry =
- cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id);
- if (entry != nullptr) {
- Cancelable* value = reinterpret_cast<Cancelable*>(entry->value);
+ auto entry = cancelable_tasks_.find(id);
+ if (entry != cancelable_tasks_.end()) {
+ Cancelable* value = entry->second;
if (value->Cancel()) {
// Cannot call RemoveFinishedTask here because of recursive locking.
- void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
- USE(removed);
- DCHECK(removed != nullptr);
+ cancelable_tasks_.erase(entry);
cancelable_tasks_barrier_.NotifyOne();
return true;
}
@@ -85,27 +70,19 @@ void CancelableTaskManager::CancelAndWait() {
// started.
base::LockGuard<base::Mutex> guard(&mutex_);
- // HashMap does not support removing while iterating, hence keep a set of
- // entries that are to be removed.
- std::set<uint32_t> to_remove;
-
- // Cancelable tasks could potentially register new tasks, requiring a loop
- // here.
- while (cancelable_tasks_.occupancy() > 0) {
- for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
- p = cancelable_tasks_.Next(p)) {
- if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
- to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
+ // Cancelable tasks could be running or could potentially register new
+ // tasks, requiring a loop here.
+ while (!cancelable_tasks_.empty()) {
+ for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
+ auto current = it;
+ // We need to get to the next element before erasing the current.
+ ++it;
+ if (current->second->Cancel()) {
+ cancelable_tasks_.erase(current);
}
}
- // Remove tasks that were successfully canceled.
- for (auto id : to_remove) {
- cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
- }
- to_remove.clear();
-
- // Finally, wait for already running background tasks.
- if (cancelable_tasks_.occupancy() > 0) {
+ // Wait for already running background tasks.
+ if (!cancelable_tasks_.empty()) {
cancelable_tasks_barrier_.Wait(&mutex_);
}
}