summaryrefslogtreecommitdiff
path: root/deps/v8/src/zone/accounting-allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/zone/accounting-allocator.cc')
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc8
1 files changed, 4 insertions, 4 deletions
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 8ef141b4c1..37ebcf0dd4 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -43,7 +43,7 @@ void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
(size_t(1) << kMinSegmentSizePower);
size_t fits_fully = max_pool_size / full_size;
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
// We assume few zones (less than 'fits_fully' many) to be active at the same
// time. When zones grow regularly, they will keep requesting segments of
@@ -138,7 +138,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
Segment* segment;
{
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
@@ -173,7 +173,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
power -= kMinSegmentSizePower;
{
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
return false;
@@ -189,7 +189,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
}
void AccountingAllocator::ClearPool() {
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {