summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests/base
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/unittests/base')
-rw-r--r--deps/v8/test/unittests/base/address-region-unittest.cc66
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/region-allocator-unittest.cc356
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc309
6 files changed, 735 insertions, 4 deletions
diff --git a/deps/v8/test/unittests/base/address-region-unittest.cc b/deps/v8/test/unittests/base/address-region-unittest.cc
new file mode 100644
index 0000000000..8dffc10247
--- /dev/null
+++ b/deps/v8/test/unittests/base/address-region-unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/address-region.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = AddressRegion::Address;
+
+TEST(AddressRegionTest, Contains) {
+ struct {
+ Address start;
+ size_t size;
+ } test_cases[] = {{153, 771}, {0, 227}, {-447, 447}};
+
+ for (size_t i = 0; i < arraysize(test_cases); i++) {
+ Address start = test_cases[i].start;
+ size_t size = test_cases[i].size;
+ Address end = start + size; // exclusive
+
+ AddressRegion region(start, size);
+
+ // Test single-argument contains().
+ CHECK(!region.contains(start - 1041));
+ CHECK(!region.contains(start - 1));
+ CHECK(!region.contains(end));
+ CHECK(!region.contains(end + 1));
+ CHECK(!region.contains(end + 113));
+
+ CHECK(region.contains(start));
+ CHECK(region.contains(start + 1));
+ CHECK(region.contains(start + size / 2));
+ CHECK(region.contains(end - 1));
+
+ // Test two-arguments contains().
+ CHECK(!region.contains(start - 1, size));
+ CHECK(!region.contains(start, size + 1));
+ CHECK(!region.contains(start - 17, 17));
+ CHECK(!region.contains(start - 17, size * 2));
+ CHECK(!region.contains(end, 1));
+ CHECK(!region.contains(end, static_cast<size_t>(0 - end)));
+
+ CHECK(region.contains(start, size));
+ CHECK(region.contains(start, 10));
+ CHECK(region.contains(start + 11, 120));
+ CHECK(region.contains(end - 13, 13));
+ CHECK(!region.contains(end, 0));
+
+ // Zero-size queries.
+ CHECK(!region.contains(start - 10, 0));
+ CHECK(!region.contains(start - 1, 0));
+ CHECK(!region.contains(end, 0));
+ CHECK(!region.contains(end + 10, 0));
+
+ CHECK(region.contains(start, 0));
+ CHECK(region.contains(start + 10, 0));
+ CHECK(region.contains(end - 1, 0));
+ }
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index b9295d49a0..207d5cbdd7 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -44,7 +44,7 @@ class FunctionalTest : public ::testing::Test {
public:
FunctionalTest()
: rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
- virtual ~FunctionalTest() {}
+ ~FunctionalTest() override = default;
RandomNumberGenerator* rng() { return &rng_; }
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index 43fd335270..b32863f4b2 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -113,8 +113,8 @@ class ThreadWithSharedMutexAndConditionVariable final : public Thread {
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
running_(false),
finished_(false),
- cv_(NULL),
- mutex_(NULL) {}
+ cv_(nullptr),
+ mutex_(nullptr) {}
void Run() override {
LockGuard<Mutex> lock_guard(mutex_);
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index f9fc26a2df..d31d85447c 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -30,7 +30,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
keys_[i] = Thread::CreateThreadLocalKey();
}
}
- ~ThreadLocalStorageTest() {
+ ~ThreadLocalStorageTest() override {
for (size_t i = 0; i < arraysize(keys_); ++i) {
Thread::DeleteThreadLocalKey(keys_[i]);
}
diff --git a/deps/v8/test/unittests/base/region-allocator-unittest.cc b/deps/v8/test/unittests/base/region-allocator-unittest.cc
new file mode 100644
index 0000000000..5024ac85eb
--- /dev/null
+++ b/deps/v8/test/unittests/base/region-allocator-unittest.cc
@@ -0,0 +1,356 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/region-allocator.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = RegionAllocator::Address;
+using v8::internal::KB;
+using v8::internal::MB;
+
+class RegionAllocatorTest : public ::testing::TestWithParam<int> {};
+
+TEST(RegionAllocatorTest, SimpleAllocateRegionAt) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free one region and then the allocation should succeed.
+ CHECK_EQ(ra.FreeRegion(kBegin), kPageSize);
+ CHECK_EQ(ra.free_size(), kPageSize);
+ CHECK(ra.AllocateRegionAt(kBegin, kPageSize));
+
+ // Free all the pages.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, SimpleAllocateRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ CHECK_EQ(ra.free_size(), kSize - kPageSize * i);
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
+ const size_t kPageSize = 8 * KB;
+ const size_t kPageCountLog = 16;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(153 * MB);
+ const Address kEnd = kBegin + kSize;
+
+ base::RandomNumberGenerator rng(GetParam());
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ std::set<Address> allocated_pages;
+ // The page addresses must be randomized this number of allocated pages.
+ const size_t kRandomizationLimit = ra.max_load_for_randomization_ / kPageSize;
+ CHECK_LT(kRandomizationLimit, kPageCount);
+
+ Address last_address = kBegin;
+ bool saw_randomized_pages = false;
+
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(&rng, kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK(IsAligned(address, kPageSize));
+ CHECK_LE(kBegin, address);
+ CHECK_LT(address, kEnd);
+ CHECK_EQ(allocated_pages.find(address), allocated_pages.end());
+ allocated_pages.insert(address);
+
+ saw_randomized_pages |= (address < last_address);
+ last_address = address;
+
+ if (i == kRandomizationLimit) {
+ // We must evidence allocation randomization till this point.
+ // The rest of the allocations may still be randomized depending on
+ // the free ranges distribution, however it is not guaranteed.
+ CHECK(saw_randomized_pages);
+ }
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+}
+
+TEST(RegionAllocatorTest, AllocateBigRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog) - 1;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * ((size_t{1} << i) - 1));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ const size_t size = kPageSize * (size_t{1} << i);
+ Address address = kBegin + kPageSize * ((size_t{1} << i) - 1);
+ CHECK_EQ(ra.FreeRegion(address), size);
+ CHECK_EQ(ra.AllocateRegion(size), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region using the following page size pattern:
+ // |0|1|22|3333|...
+ CHECK_EQ(ra.AllocateRegion(kPageSize), kBegin);
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * (size_t{1} << i));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free two coalescing regions and ensure the new page of bigger size
+ // can be allocated.
+ size_t current_size = kPageSize;
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ CHECK_EQ(ra.FreeRegion(kBegin), current_size);
+ CHECK_EQ(ra.FreeRegion(kBegin + current_size), current_size);
+ current_size += current_size;
+ CHECK_EQ(ra.AllocateRegion(current_size), kBegin);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
+ base::RandomNumberGenerator rng(GetParam());
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages with even indices left-to-right.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Free pages with odd indices right-to-left.
+ for (size_t i = 1; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * (kPageCount - i);
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ // Now we should be able to allocate a double-sized page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2), address - kPageSize);
+ // .. but there's a window for only one such page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2),
+ RegionAllocator::kAllocationFailure);
+ }
+
+ // Free all the double-sized pages.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize * 2);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, Fragmentation) {
+ const size_t kPageSize = 64 * KB;
+ const size_t kPageCount = 9;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages in the following order and check the freed size.
+ struct {
+ size_t page_index_to_free;
+ size_t expected_page_count;
+ } testcase[] = { // .........
+ {0, 9}, // x........
+ {2, 9}, // x.x......
+ {4, 9}, // x.x.x....
+ {6, 9}, // x.x.x.x..
+ {8, 9}, // x.x.x.x.x
+ {1, 7}, // xxx.x.x.x
+ {7, 5}, // xxx.x.xxx
+ {3, 3}, // xxxxx.xxx
+ {5, 1}}; // xxxxxxxxx
+ CHECK_EQ(kPageCount, arraysize(testcase));
+
+ CHECK_EQ(ra.all_regions_.size(), kPageCount);
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = kBegin + kPageSize * testcase[i].page_index_to_free;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.all_regions_.size(), testcase[i].expected_page_count);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, FindRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // The out-of region requests must return end iterator.
+ CHECK_EQ(ra.FindRegion(kBegin - 1), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin - kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin / 2), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd + kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd * 2), ra.all_regions_.end());
+
+ for (Address address = kBegin; address < kEnd; address += kPageSize / 4) {
+ RegionAllocator::AllRegionsSet::iterator region_iter =
+ ra.FindRegion(address);
+ CHECK_NE(region_iter, ra.all_regions_.end());
+ RegionAllocator::Region* region = *region_iter;
+ Address region_start = RoundDown(address, kPageSize);
+ CHECK_EQ(region->begin(), region_start);
+ CHECK_LE(region->begin(), address);
+ CHECK_LT(address, region->end());
+ }
+}
+
+TEST(RegionAllocatorTest, TrimRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 64;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ Address address = kBegin + 13 * kPageSize;
+ size_t size = 37 * kPageSize;
+ size_t free_size = kSize - size;
+ CHECK(ra.AllocateRegionAt(address, size));
+
+ size_t trim_size = kPageSize;
+ do {
+ CHECK_EQ(ra.CheckRegion(address), size);
+ CHECK_EQ(ra.free_size(), free_size);
+
+ trim_size = std::min(size, trim_size);
+ size -= trim_size;
+ free_size += trim_size;
+ CHECK_EQ(ra.TrimRegion(address, size), trim_size);
+ trim_size *= 2;
+ } while (size != 0);
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
new file mode 100644
index 0000000000..96a730370b
--- /dev/null
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -0,0 +1,309 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iterator>
+
+#include "src/v8.h"
+
+#include "src/base/threaded-list.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+struct ThreadedListTestNode {
+ ThreadedListTestNode() : next_(nullptr), other_next_(nullptr) {}
+
+ ThreadedListTestNode** next() { return &next_; }
+
+ ThreadedListTestNode* next_;
+
+ struct OtherTraits {
+ static ThreadedListTestNode** next(ThreadedListTestNode* t) {
+ return t->other_next();
+ }
+ };
+
+ ThreadedListTestNode** other_next() { return &other_next_; }
+
+ ThreadedListTestNode* other_next_;
+};
+
+struct ThreadedListTest : public ::testing::Test {
+ static const size_t INIT_NODES = 5;
+ ThreadedListTest() {}
+
+ void SetUp() override {
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ nodes[i] = ThreadedListTestNode();
+ }
+
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ list.Add(&nodes[i]);
+ normal_next_list.Add(&nodes[i]);
+ }
+
+ // Verify if setup worked
+ CHECK(list.Verify());
+ CHECK_EQ(list.LengthForTest(), INIT_NODES);
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+
+ extra_test_node_0 = ThreadedListTestNode();
+ extra_test_node_1 = ThreadedListTestNode();
+ extra_test_node_2 = ThreadedListTestNode();
+
+ extra_test_list.Add(&extra_test_node_0);
+ extra_test_list.Add(&extra_test_node_1);
+ extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK(extra_test_list.Verify());
+
+ normal_extra_test_list.Add(&extra_test_node_0);
+ normal_extra_test_list.Add(&extra_test_node_1);
+ normal_extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK(normal_extra_test_list.Verify());
+ }
+
+ void TearDown() override {
+ // Check if the normal list threaded through next is still untouched.
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+ CHECK_EQ(normal_next_list.AtForTest(0), &nodes[0]);
+ CHECK_EQ(normal_next_list.AtForTest(4), &nodes[4]);
+ CHECK(normal_extra_test_list.Verify());
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(normal_extra_test_list.AtForTest(0), &extra_test_node_0);
+ CHECK_EQ(normal_extra_test_list.AtForTest(2), &extra_test_node_2);
+
+ list.Clear();
+ extra_test_list.Clear();
+ }
+
+ ThreadedListTestNode nodes[INIT_NODES];
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> list;
+ ThreadedList<ThreadedListTestNode> normal_next_list;
+
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ extra_test_list;
+ ThreadedList<ThreadedListTestNode> normal_extra_test_list;
+ ThreadedListTestNode extra_test_node_0;
+ ThreadedListTestNode extra_test_node_1;
+ ThreadedListTestNode extra_test_node_2;
+};
+
+TEST_F(ThreadedListTest, Add) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // Add to existing list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.AtForTest(5), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // Add to empty list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.AtForTest(0), &new_node);
+}
+
+TEST_F(ThreadedListTest, AddFront) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // AddFront to existing list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.first(), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // AddFront to empty list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.first(), &new_node);
+}
+
+TEST_F(ThreadedListTest, ReinitializeHead) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ CHECK_NE(extra_test_list.first(), list.first());
+ list.ReinitializeHead(&extra_test_node_0);
+ list.Verify();
+ CHECK_EQ(extra_test_list.first(), list.first());
+ CHECK_EQ(extra_test_list.end(), list.end());
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+}
+
+TEST_F(ThreadedListTest, DropHead) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_0);
+ extra_test_list.DropHead();
+ extra_test_list.Verify();
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_1);
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+}
+
+TEST_F(ThreadedListTest, Append) {
+ auto initial_extra_list_end = extra_test_list.end();
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Append(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.AtForTest(4), &nodes[4]);
+ CHECK_EQ(list.AtForTest(5), &extra_test_node_0);
+ CHECK_EQ(list.end(), initial_extra_list_end);
+}
+
+TEST_F(ThreadedListTest, Prepend) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Prepend(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.first(), &extra_test_node_0);
+ CHECK_EQ(list.AtForTest(2), &extra_test_node_2);
+ CHECK_EQ(list.AtForTest(3), &nodes[0]);
+}
+
+TEST_F(ThreadedListTest, Clear) {
+ CHECK_NE(list.LengthForTest(), 0);
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+ CHECK_NULL(list.first());
+}
+
+TEST_F(ThreadedListTest, MoveAssign) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list;
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ m_list = std::move(extra_test_list);
+
+ m_list.Verify();
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+ CHECK_EQ(m_list.LengthForTest(), 3);
+
+ // move assign from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ m_list = std::move(extra_test_list);
+ CHECK_EQ(m_list.LengthForTest(), 0);
+
+ m_list.Verify();
+ CHECK_NULL(m_list.first());
+}
+
+TEST_F(ThreadedListTest, MoveCtor) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list(
+ std::move(extra_test_list));
+
+ m_list.Verify();
+ CHECK_EQ(m_list.LengthForTest(), 3);
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+
+ // move construct from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list2(
+ std::move(extra_test_list));
+ CHECK_EQ(m_list2.LengthForTest(), 0);
+
+ m_list2.Verify();
+ CHECK_NULL(m_list2.first());
+}
+
+TEST_F(ThreadedListTest, Remove) {
+ CHECK_EQ(list.LengthForTest(), 5);
+
+ // Remove first
+ CHECK_EQ(list.first(), &nodes[0]);
+ list.Remove(&nodes[0]);
+ list.Verify();
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.LengthForTest(), 4);
+
+ // Remove middle
+ list.Remove(&nodes[2]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 3);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove last
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 2);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove rest
+ list.Remove(&nodes[1]);
+ list.Remove(&nodes[3]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ // Remove not found
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, Rewind) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ for (auto iter = extra_test_list.begin(); iter != extra_test_list.end();
+ ++iter) {
+ if (*iter == &extra_test_node_2) {
+ extra_test_list.Rewind(iter);
+ break;
+ }
+ }
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+ auto iter = extra_test_list.begin();
+ CHECK_EQ(*iter, &extra_test_node_0);
+ std::advance(iter, 1);
+ CHECK_EQ(*iter, &extra_test_node_1);
+
+ extra_test_list.Rewind(extra_test_list.begin());
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, IterComp) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> c_list =
+ std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+TEST_F(ThreadedListTest, ConstIterComp) {
+ const ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ c_list = std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+} // namespace base
+} // namespace v8