summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/BUILD.gn6
-rw-r--r--deps/v8/test/unittests/allocation-unittest.cc41
-rw-r--r--deps/v8/test/unittests/api/interceptor-unittest.cc2
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc69
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/address-region-unittest.cc66
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/region-allocator-unittest.cc356
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc309
-rw-r--r--deps/v8/test/unittests/cancelable-tasks-unittest.cc2
-rw-r--r--deps/v8/test/unittests/code-stub-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc590
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc227
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/instruction-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc50
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc76
-rw-r--r--deps/v8/test/unittests/compiler/node-cache-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/node-matchers-unittest.cc468
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc51
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h19
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc1170
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/bitmap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc19
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc60
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc16
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc11
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc10
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc3
-rw-r--r--deps/v8/test/unittests/object-unittest.cc4
-rw-r--r--deps/v8/test/unittests/objects/microtask-queue-unittest.cc55
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc2
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc4
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc6
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc4
-rw-r--r--deps/v8/test/unittests/test-helpers.cc32
-rw-r--r--deps/v8/test/unittests/test-helpers.h2
-rw-r--r--deps/v8/test/unittests/test-utils.cc41
-rw-r--r--deps/v8/test/unittests/test-utils.h24
-rw-r--r--deps/v8/test/unittests/unittests.status5
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc20
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc87
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc153
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc139
-rw-r--r--deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc2
84 files changed, 3423 insertions, 994 deletions
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 606fe9c343..f63e2af197 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -55,6 +55,7 @@ v8_source_set("unittests_sources") {
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
"asmjs/switch-logic-unittest.cc",
+ "base/address-region-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
"base/cpu-unittest.cc",
@@ -72,8 +73,10 @@ v8_source_set("unittests_sources") {
"base/platform/platform-unittest.cc",
"base/platform/semaphore-unittest.cc",
"base/platform/time-unittest.cc",
+ "base/region-allocator-unittest.cc",
"base/sys-info-unittest.cc",
"base/template-utils-unittest.cc",
+ "base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
"bigint-unittest.cc",
"cancelable-tasks-unittest.cc",
@@ -112,6 +115,7 @@ v8_source_set("unittests_sources") {
"compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
"compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-native-context-specialization-unittest.cc",
"compiler/js-operator-unittest.cc",
"compiler/js-typed-lowering-unittest.cc",
"compiler/linkage-tail-call-unittest.cc",
@@ -128,6 +132,7 @@ v8_source_set("unittests_sources") {
"compiler/node-unittest.cc",
"compiler/opcodes-unittest.cc",
"compiler/persistent-unittest.cc",
+ "compiler/redundancy-elimination-unittest.cc",
"compiler/regalloc/live-range-unittest.cc",
"compiler/regalloc/move-optimizer-unittest.cc",
"compiler/regalloc/register-allocator-unittest.cc",
@@ -182,6 +187,7 @@ v8_source_set("unittests_sources") {
"libplatform/worker-thread-unittest.cc",
"locked-queue-unittest.cc",
"object-unittest.cc",
+ "objects/microtask-queue-unittest.cc",
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
"register-configuration-unittest.cc",
diff --git a/deps/v8/test/unittests/allocation-unittest.cc b/deps/v8/test/unittests/allocation-unittest.cc
index 3e43cdd4ea..7b543ece24 100644
--- a/deps/v8/test/unittests/allocation-unittest.cc
+++ b/deps/v8/test/unittests/allocation-unittest.cc
@@ -40,7 +40,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
#endif
protected:
- virtual void SetUp() {
+ void SetUp() override {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
@@ -51,7 +51,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
#endif
}
- virtual void TearDown() {
+ void TearDown() override {
// Be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_action_, nullptr);
#if V8_OS_MACOSX
@@ -95,12 +95,14 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(PageAllocator::Permission permission, bool can_read,
bool can_write) {
- const size_t page_size = AllocatePageSize();
- int* buffer = static_cast<int*>(
- AllocatePages(nullptr, page_size, page_size, permission));
+ v8::PageAllocator* page_allocator =
+ v8::internal::GetPlatformPageAllocator();
+ const size_t page_size = page_allocator->AllocatePageSize();
+ int* buffer = static_cast<int*>(AllocatePages(
+ page_allocator, nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
- CHECK(FreePages(buffer, page_size));
+ CHECK(FreePages(page_allocator, buffer, page_size));
}
};
@@ -125,41 +127,46 @@ TEST(AllocationTest, AllocateAndFree) {
size_t page_size = v8::internal::AllocatePageSize();
CHECK_NE(0, page_size);
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+
// A large allocation, aligned at native allocation granularity.
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
- v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
- PageAllocator::Permission::kReadWrite);
+ page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
+ page_size, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
- CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
void* aligned_mem_addr = v8::internal::AllocatePages(
- AlignedAddress(v8::internal::GetRandomMmapAddr(), kBigAlignment),
+ page_allocator,
+ AlignedAddress(page_allocator->GetRandomMmapAddr(), kBigAlignment),
kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
- CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, aligned_mem_addr,
+ kAllocationSize));
}
TEST(AllocationTest, ReserveMemory) {
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
size_t page_size = v8::internal::AllocatePageSize();
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
- v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
- PageAllocator::Permission::kReadWrite);
+ page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
+ page_size, PageAllocator::Permission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
- size_t commit_size = v8::internal::CommitPageSize();
- CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ size_t commit_size = page_allocator->CommitPageSize();
+ CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[v8::internal::KB - 1] = 2;
- CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kNoAccess));
- CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/api/interceptor-unittest.cc b/deps/v8/test/unittests/api/interceptor-unittest.cc
index b13384f18a..8a1db3f823 100644
--- a/deps/v8/test/unittests/api/interceptor-unittest.cc
+++ b/deps/v8/test/unittests/api/interceptor-unittest.cc
@@ -35,7 +35,7 @@ namespace {
class InterceptorLoggingTest : public TestWithNativeContext {
public:
- InterceptorLoggingTest() {}
+ InterceptorLoggingTest() = default;
static const int kTestIndex = 0;
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 377ad83187..8ddf8a29c8 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -70,4 +70,73 @@ TEST_F(IsolateTest, MemoryPressureNotificationBackground) {
v8::platform::PumpMessageLoop(internal::V8::GetCurrentPlatform(), isolate());
}
+using IncumbentContextTest = TestWithIsolate;
+
+// Check that Isolate::GetIncumbentContext() returns the correct one in basic
+// scenarios.
+#if !defined(V8_USE_ADDRESS_SANITIZER)
+TEST_F(IncumbentContextTest, MAYBE_Basic) {
+ auto Str = [&](const char* s) {
+ return String::NewFromUtf8(isolate(), s, NewStringType::kNormal)
+ .ToLocalChecked();
+ };
+ auto Run = [&](Local<Context> context, const char* script) {
+ Context::Scope scope(context);
+ return Script::Compile(context, Str(script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ };
+
+ // Set up the test environment; three contexts with getIncumbentGlobal()
+ // function.
+ Local<FunctionTemplate> get_incumbent_global = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<Value>& info) {
+ Local<Context> incumbent_context =
+ info.GetIsolate()->GetIncumbentContext();
+ info.GetReturnValue().Set(incumbent_context->Global());
+ });
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate());
+ global_template->Set(Str("getIncumbentGlobal"), get_incumbent_global);
+
+ Local<Context> context_a = Context::New(isolate(), nullptr, global_template);
+ Local<Context> context_b = Context::New(isolate(), nullptr, global_template);
+ Local<Context> context_c = Context::New(isolate(), nullptr, global_template);
+ Local<Object> global_a = context_a->Global();
+ Local<Object> global_b = context_b->Global();
+ Local<Object> global_c = context_c->Global();
+
+ Local<String> security_token = Str("security_token");
+ context_a->SetSecurityToken(security_token);
+ context_b->SetSecurityToken(security_token);
+ context_c->SetSecurityToken(security_token);
+
+ global_a->Set(context_a, Str("b"), global_b).ToChecked();
+ global_b->Set(context_b, Str("c"), global_c).ToChecked();
+
+ // Test scenario 2: A -> B -> C, then the incumbent is C.
+ Run(context_a, "funcA = function() { return b.funcB(); }");
+ Run(context_b, "funcB = function() { return c.getIncumbentGlobal(); }");
+ // Without BackupIncumbentScope.
+ EXPECT_EQ(global_b, Run(context_a, "funcA()"));
+ {
+ // With BackupIncumbentScope.
+ Context::BackupIncumbentScope backup_incumbent(context_a);
+ EXPECT_EQ(global_b, Run(context_a, "funcA()"));
+ }
+
+ // Test scenario 2: A -> B -> C -> C, then the incumbent is C.
+ Run(context_a, "funcA = function() { return b.funcB(); }");
+ Run(context_b, "funcB = function() { return c.funcC(); }");
+ Run(context_c, "funcC = function() { return getIncumbentGlobal(); }");
+ // Without BackupIncumbentScope.
+ EXPECT_EQ(global_c, Run(context_a, "funcA()"));
+ {
+ // With BackupIncumbentScope.
+ Context::BackupIncumbentScope backup_incumbent(context_a);
+ EXPECT_EQ(global_c, Run(context_a, "funcA()"));
+ }
+}
+#endif // !defined(V8_USE_ADDRESS_SANITIZER)
+
} // namespace v8
diff --git a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index f17528977c..db5ed2ba52 100644
--- a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -203,7 +203,7 @@ TEST_F(AsmTypeTest, SaneParentsMap) {
<< Type::CamelName()->Name() << ", parents " \
<< reinterpret_cast<void*>(parents) << ", type " \
<< static_cast<void*>(Type::CamelName()); \
- } while (0);
+ } while (false);
FOR_EACH_ASM_VALUE_TYPE_LIST(V)
#undef V
}
@@ -212,7 +212,7 @@ TEST_F(AsmTypeTest, Names) {
#define V(CamelName, string_name, number, parent_types) \
do { \
EXPECT_THAT(Type::CamelName()->Name(), StrEq(string_name)); \
- } while (0);
+ } while (false);
FOR_EACH_ASM_VALUE_TYPE_LIST(V)
#undef V
diff --git a/deps/v8/test/unittests/base/address-region-unittest.cc b/deps/v8/test/unittests/base/address-region-unittest.cc
new file mode 100644
index 0000000000..8dffc10247
--- /dev/null
+++ b/deps/v8/test/unittests/base/address-region-unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/address-region.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = AddressRegion::Address;
+
+TEST(AddressRegionTest, Contains) {
+ struct {
+ Address start;
+ size_t size;
+ } test_cases[] = {{153, 771}, {0, 227}, {-447, 447}};
+
+ for (size_t i = 0; i < arraysize(test_cases); i++) {
+ Address start = test_cases[i].start;
+ size_t size = test_cases[i].size;
+ Address end = start + size; // exclusive
+
+ AddressRegion region(start, size);
+
+ // Test single-argument contains().
+ CHECK(!region.contains(start - 1041));
+ CHECK(!region.contains(start - 1));
+ CHECK(!region.contains(end));
+ CHECK(!region.contains(end + 1));
+ CHECK(!region.contains(end + 113));
+
+ CHECK(region.contains(start));
+ CHECK(region.contains(start + 1));
+ CHECK(region.contains(start + size / 2));
+ CHECK(region.contains(end - 1));
+
+ // Test two-arguments contains().
+ CHECK(!region.contains(start - 1, size));
+ CHECK(!region.contains(start, size + 1));
+ CHECK(!region.contains(start - 17, 17));
+ CHECK(!region.contains(start - 17, size * 2));
+ CHECK(!region.contains(end, 1));
+ CHECK(!region.contains(end, static_cast<size_t>(0 - end)));
+
+ CHECK(region.contains(start, size));
+ CHECK(region.contains(start, 10));
+ CHECK(region.contains(start + 11, 120));
+ CHECK(region.contains(end - 13, 13));
+ CHECK(!region.contains(end, 0));
+
+ // Zero-size queries.
+ CHECK(!region.contains(start - 10, 0));
+ CHECK(!region.contains(start - 1, 0));
+ CHECK(!region.contains(end, 0));
+ CHECK(!region.contains(end + 10, 0));
+
+ CHECK(region.contains(start, 0));
+ CHECK(region.contains(start + 10, 0));
+ CHECK(region.contains(end - 1, 0));
+ }
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index b9295d49a0..207d5cbdd7 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -44,7 +44,7 @@ class FunctionalTest : public ::testing::Test {
public:
FunctionalTest()
: rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
- virtual ~FunctionalTest() {}
+ ~FunctionalTest() override = default;
RandomNumberGenerator* rng() { return &rng_; }
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index 43fd335270..b32863f4b2 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -113,8 +113,8 @@ class ThreadWithSharedMutexAndConditionVariable final : public Thread {
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
running_(false),
finished_(false),
- cv_(NULL),
- mutex_(NULL) {}
+ cv_(nullptr),
+ mutex_(nullptr) {}
void Run() override {
LockGuard<Mutex> lock_guard(mutex_);
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index f9fc26a2df..d31d85447c 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -30,7 +30,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
keys_[i] = Thread::CreateThreadLocalKey();
}
}
- ~ThreadLocalStorageTest() {
+ ~ThreadLocalStorageTest() override {
for (size_t i = 0; i < arraysize(keys_); ++i) {
Thread::DeleteThreadLocalKey(keys_[i]);
}
diff --git a/deps/v8/test/unittests/base/region-allocator-unittest.cc b/deps/v8/test/unittests/base/region-allocator-unittest.cc
new file mode 100644
index 0000000000..5024ac85eb
--- /dev/null
+++ b/deps/v8/test/unittests/base/region-allocator-unittest.cc
@@ -0,0 +1,356 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/region-allocator.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = RegionAllocator::Address;
+using v8::internal::KB;
+using v8::internal::MB;
+
+class RegionAllocatorTest : public ::testing::TestWithParam<int> {};
+
+TEST(RegionAllocatorTest, SimpleAllocateRegionAt) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free one region and then the allocation should succeed.
+ CHECK_EQ(ra.FreeRegion(kBegin), kPageSize);
+ CHECK_EQ(ra.free_size(), kPageSize);
+ CHECK(ra.AllocateRegionAt(kBegin, kPageSize));
+
+ // Free all the pages.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, SimpleAllocateRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ CHECK_EQ(ra.free_size(), kSize - kPageSize * i);
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
+ const size_t kPageSize = 8 * KB;
+ const size_t kPageCountLog = 16;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(153 * MB);
+ const Address kEnd = kBegin + kSize;
+
+ base::RandomNumberGenerator rng(GetParam());
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ std::set<Address> allocated_pages;
+ // The page addresses must be randomized this number of allocated pages.
+ const size_t kRandomizationLimit = ra.max_load_for_randomization_ / kPageSize;
+ CHECK_LT(kRandomizationLimit, kPageCount);
+
+ Address last_address = kBegin;
+ bool saw_randomized_pages = false;
+
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(&rng, kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK(IsAligned(address, kPageSize));
+ CHECK_LE(kBegin, address);
+ CHECK_LT(address, kEnd);
+ CHECK_EQ(allocated_pages.find(address), allocated_pages.end());
+ allocated_pages.insert(address);
+
+ saw_randomized_pages |= (address < last_address);
+ last_address = address;
+
+ if (i == kRandomizationLimit) {
+ // We must evidence allocation randomization till this point.
+ // The rest of the allocations may still be randomized depending on
+ // the free ranges distribution, however it is not guaranteed.
+ CHECK(saw_randomized_pages);
+ }
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+}
+
+TEST(RegionAllocatorTest, AllocateBigRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog) - 1;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * ((size_t{1} << i) - 1));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ const size_t size = kPageSize * (size_t{1} << i);
+ Address address = kBegin + kPageSize * ((size_t{1} << i) - 1);
+ CHECK_EQ(ra.FreeRegion(address), size);
+ CHECK_EQ(ra.AllocateRegion(size), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region using the following page size pattern:
+ // |0|1|22|3333|...
+ CHECK_EQ(ra.AllocateRegion(kPageSize), kBegin);
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * (size_t{1} << i));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free two coalescing regions and ensure the new page of bigger size
+ // can be allocated.
+ size_t current_size = kPageSize;
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ CHECK_EQ(ra.FreeRegion(kBegin), current_size);
+ CHECK_EQ(ra.FreeRegion(kBegin + current_size), current_size);
+ current_size += current_size;
+ CHECK_EQ(ra.AllocateRegion(current_size), kBegin);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
+ base::RandomNumberGenerator rng(GetParam());
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages with even indices left-to-right.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Free pages with odd indices right-to-left.
+ for (size_t i = 1; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * (kPageCount - i);
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ // Now we should be able to allocate a double-sized page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2), address - kPageSize);
+ // .. but there's a window for only one such page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2),
+ RegionAllocator::kAllocationFailure);
+ }
+
+ // Free all the double-sized pages.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize * 2);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, Fragmentation) {
+ const size_t kPageSize = 64 * KB;
+ const size_t kPageCount = 9;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages in the following order and check the freed size.
+ struct {
+ size_t page_index_to_free;
+ size_t expected_page_count;
+ } testcase[] = { // .........
+ {0, 9}, // x........
+ {2, 9}, // x.x......
+ {4, 9}, // x.x.x....
+ {6, 9}, // x.x.x.x..
+ {8, 9}, // x.x.x.x.x
+ {1, 7}, // xxx.x.x.x
+ {7, 5}, // xxx.x.xxx
+ {3, 3}, // xxxxx.xxx
+ {5, 1}}; // xxxxxxxxx
+ CHECK_EQ(kPageCount, arraysize(testcase));
+
+ CHECK_EQ(ra.all_regions_.size(), kPageCount);
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = kBegin + kPageSize * testcase[i].page_index_to_free;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.all_regions_.size(), testcase[i].expected_page_count);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, FindRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // The out-of region requests must return end iterator.
+ CHECK_EQ(ra.FindRegion(kBegin - 1), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin - kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin / 2), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd + kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd * 2), ra.all_regions_.end());
+
+ for (Address address = kBegin; address < kEnd; address += kPageSize / 4) {
+ RegionAllocator::AllRegionsSet::iterator region_iter =
+ ra.FindRegion(address);
+ CHECK_NE(region_iter, ra.all_regions_.end());
+ RegionAllocator::Region* region = *region_iter;
+ Address region_start = RoundDown(address, kPageSize);
+ CHECK_EQ(region->begin(), region_start);
+ CHECK_LE(region->begin(), address);
+ CHECK_LT(address, region->end());
+ }
+}
+
+TEST(RegionAllocatorTest, TrimRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 64;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ Address address = kBegin + 13 * kPageSize;
+ size_t size = 37 * kPageSize;
+ size_t free_size = kSize - size;
+ CHECK(ra.AllocateRegionAt(address, size));
+
+ size_t trim_size = kPageSize;
+ do {
+ CHECK_EQ(ra.CheckRegion(address), size);
+ CHECK_EQ(ra.free_size(), free_size);
+
+ trim_size = std::min(size, trim_size);
+ size -= trim_size;
+ free_size += trim_size;
+ CHECK_EQ(ra.TrimRegion(address, size), trim_size);
+ trim_size *= 2;
+ } while (size != 0);
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
new file mode 100644
index 0000000000..96a730370b
--- /dev/null
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -0,0 +1,309 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iterator>
+
+#include "src/v8.h"
+
+#include "src/base/threaded-list.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+struct ThreadedListTestNode {
+ ThreadedListTestNode() : next_(nullptr), other_next_(nullptr) {}
+
+ ThreadedListTestNode** next() { return &next_; }
+
+ ThreadedListTestNode* next_;
+
+ struct OtherTraits {
+ static ThreadedListTestNode** next(ThreadedListTestNode* t) {
+ return t->other_next();
+ }
+ };
+
+ ThreadedListTestNode** other_next() { return &other_next_; }
+
+ ThreadedListTestNode* other_next_;
+};
+
+struct ThreadedListTest : public ::testing::Test {
+ static const size_t INIT_NODES = 5;
+ ThreadedListTest() {}
+
+ void SetUp() override {
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ nodes[i] = ThreadedListTestNode();
+ }
+
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ list.Add(&nodes[i]);
+ normal_next_list.Add(&nodes[i]);
+ }
+
+ // Verify if setup worked
+ CHECK(list.Verify());
+ CHECK_EQ(list.LengthForTest(), INIT_NODES);
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+
+ extra_test_node_0 = ThreadedListTestNode();
+ extra_test_node_1 = ThreadedListTestNode();
+ extra_test_node_2 = ThreadedListTestNode();
+
+ extra_test_list.Add(&extra_test_node_0);
+ extra_test_list.Add(&extra_test_node_1);
+ extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK(extra_test_list.Verify());
+
+ normal_extra_test_list.Add(&extra_test_node_0);
+ normal_extra_test_list.Add(&extra_test_node_1);
+ normal_extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK(normal_extra_test_list.Verify());
+ }
+
+ void TearDown() override {
+ // Check if the normal list threaded through next is still untouched.
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+ CHECK_EQ(normal_next_list.AtForTest(0), &nodes[0]);
+ CHECK_EQ(normal_next_list.AtForTest(4), &nodes[4]);
+ CHECK(normal_extra_test_list.Verify());
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(normal_extra_test_list.AtForTest(0), &extra_test_node_0);
+ CHECK_EQ(normal_extra_test_list.AtForTest(2), &extra_test_node_2);
+
+ list.Clear();
+ extra_test_list.Clear();
+ }
+
+ ThreadedListTestNode nodes[INIT_NODES];
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> list;
+ ThreadedList<ThreadedListTestNode> normal_next_list;
+
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ extra_test_list;
+ ThreadedList<ThreadedListTestNode> normal_extra_test_list;
+ ThreadedListTestNode extra_test_node_0;
+ ThreadedListTestNode extra_test_node_1;
+ ThreadedListTestNode extra_test_node_2;
+};
+
+TEST_F(ThreadedListTest, Add) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // Add to existing list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.AtForTest(5), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // Add to empty list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.AtForTest(0), &new_node);
+}
+
+TEST_F(ThreadedListTest, AddFront) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // AddFront to existing list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.first(), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // AddFront to empty list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.first(), &new_node);
+}
+
+TEST_F(ThreadedListTest, ReinitializeHead) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ CHECK_NE(extra_test_list.first(), list.first());
+ list.ReinitializeHead(&extra_test_node_0);
+ list.Verify();
+ CHECK_EQ(extra_test_list.first(), list.first());
+ CHECK_EQ(extra_test_list.end(), list.end());
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+}
+
+TEST_F(ThreadedListTest, DropHead) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_0);
+ extra_test_list.DropHead();
+ extra_test_list.Verify();
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_1);
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+}
+
+TEST_F(ThreadedListTest, Append) {
+ auto initial_extra_list_end = extra_test_list.end();
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Append(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.AtForTest(4), &nodes[4]);
+ CHECK_EQ(list.AtForTest(5), &extra_test_node_0);
+ CHECK_EQ(list.end(), initial_extra_list_end);
+}
+
+TEST_F(ThreadedListTest, Prepend) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Prepend(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.first(), &extra_test_node_0);
+ CHECK_EQ(list.AtForTest(2), &extra_test_node_2);
+ CHECK_EQ(list.AtForTest(3), &nodes[0]);
+}
+
+TEST_F(ThreadedListTest, Clear) {
+ CHECK_NE(list.LengthForTest(), 0);
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+ CHECK_NULL(list.first());
+}
+
+TEST_F(ThreadedListTest, MoveAssign) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list;
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ m_list = std::move(extra_test_list);
+
+ m_list.Verify();
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+ CHECK_EQ(m_list.LengthForTest(), 3);
+
+ // move assign from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ m_list = std::move(extra_test_list);
+ CHECK_EQ(m_list.LengthForTest(), 0);
+
+ m_list.Verify();
+ CHECK_NULL(m_list.first());
+}
+
+TEST_F(ThreadedListTest, MoveCtor) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list(
+ std::move(extra_test_list));
+
+ m_list.Verify();
+ CHECK_EQ(m_list.LengthForTest(), 3);
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+
+ // move construct from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list2(
+ std::move(extra_test_list));
+ CHECK_EQ(m_list2.LengthForTest(), 0);
+
+ m_list2.Verify();
+ CHECK_NULL(m_list2.first());
+}
+
+TEST_F(ThreadedListTest, Remove) {
+ CHECK_EQ(list.LengthForTest(), 5);
+
+ // Remove first
+ CHECK_EQ(list.first(), &nodes[0]);
+ list.Remove(&nodes[0]);
+ list.Verify();
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.LengthForTest(), 4);
+
+ // Remove middle
+ list.Remove(&nodes[2]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 3);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove last
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 2);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove rest
+ list.Remove(&nodes[1]);
+ list.Remove(&nodes[3]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ // Remove not found
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, Rewind) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ for (auto iter = extra_test_list.begin(); iter != extra_test_list.end();
+ ++iter) {
+ if (*iter == &extra_test_node_2) {
+ extra_test_list.Rewind(iter);
+ break;
+ }
+ }
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+ auto iter = extra_test_list.begin();
+ CHECK_EQ(*iter, &extra_test_node_0);
+ std::advance(iter, 1);
+ CHECK_EQ(*iter, &extra_test_node_1);
+
+ extra_test_list.Rewind(extra_test_list.begin());
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, IterComp) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> c_list =
+ std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+TEST_F(ThreadedListTest, ConstIterComp) {
+ const ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ c_list = std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
index d0462877f5..97ac4d4b7d 100644
--- a/deps/v8/test/unittests/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
@@ -71,7 +71,7 @@ class ThreadedRunner final : public base::Thread {
explicit ThreadedRunner(TestTask* task)
: Thread(Options("runner thread")), task_(task) {}
- virtual void Run() {
+ void Run() override {
task_->Run();
delete task_;
}
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.h b/deps/v8/test/unittests/code-stub-assembler-unittest.h
index 2c32e0f9b7..c48eb772c0 100644
--- a/deps/v8/test/unittests/code-stub-assembler-unittest.h
+++ b/deps/v8/test/unittests/code-stub-assembler-unittest.h
@@ -14,8 +14,8 @@ namespace internal {
class CodeStubAssemblerTest : public TestWithIsolateAndZone {
public:
- CodeStubAssemblerTest() {}
- ~CodeStubAssemblerTest() override {}
+ CodeStubAssemblerTest() = default;
+ ~CodeStubAssemblerTest() override = default;
};
class CodeStubAssemblerTestState : public compiler::CodeAssemblerState {
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 45121aedb3..bfc111aed5 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -9,6 +9,8 @@
#include "include/v8-platform.h"
#include "src/api-inl.h"
#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
@@ -25,16 +27,6 @@
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
-// V8 is smart enough to know something was already compiled and return compiled
-// code straight away. We need a unique name for each test function so that V8
-// returns an empty SharedFunctionInfo.
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define _SCRIPT(fn, a, b, c) a fn b fn c
-#define SCRIPT(a, b, c) _SCRIPT("f" STR(__LINE__), a, b, c)
-#define TEST_SCRIPT() \
- "function f" STR(__LINE__) "(x, y) { return x * y }; f" STR(__LINE__) ";"
-
namespace v8 {
namespace internal {
@@ -77,6 +69,37 @@ class CompilerDispatcherTest : public TestWithNativeContext {
CompilerDispatcherTestFlags::RestoreFlags();
}
+ static base::Optional<CompilerDispatcher::JobId> EnqueueUnoptimizedCompileJob(
+ CompilerDispatcher* dispatcher, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ std::unique_ptr<ParseInfo> outer_parse_info =
+ test::OuterParseInfoForShared(isolate, shared);
+ AstValueFactory* ast_value_factory =
+ outer_parse_info->GetOrCreateAstValueFactory();
+ AstNodeFactory ast_node_factory(ast_value_factory,
+ outer_parse_info->zone());
+
+ const AstRawString* function_name =
+ ast_value_factory->GetOneByteString("f");
+ DeclarationScope* script_scope = new (outer_parse_info->zone())
+ DeclarationScope(outer_parse_info->zone(), ast_value_factory);
+ DeclarationScope* function_scope =
+ new (outer_parse_info->zone()) DeclarationScope(
+ outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
+ function_scope->set_start_position(shared->StartPosition());
+ function_scope->set_end_position(shared->EndPosition());
+ const FunctionLiteral* function_literal =
+ ast_node_factory.NewFunctionLiteral(
+ function_name, function_scope, nullptr, -1, -1, -1,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
+ shared->FunctionLiteralId(isolate), nullptr);
+
+ return dispatcher->Enqueue(outer_parse_info.get(), function_name,
+ function_literal);
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTest);
};
@@ -238,7 +261,7 @@ class MockPlatform : public v8::Platform {
TaskWrapper(MockPlatform* platform,
std::vector<std::unique_ptr<Task>> tasks, bool signal)
: platform_(platform), tasks_(std::move(tasks)), signal_(signal) {}
- ~TaskWrapper() = default;
+ ~TaskWrapper() override = default;
void Run() override {
for (auto& task : tasks_) {
@@ -313,17 +336,27 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+ ASSERT_TRUE(job_id);
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared)); // SFI not yet registered.
+
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+
dispatcher.AbortAll(BlockingBehavior::kBlock);
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(platform.IdleTaskPending());
+ platform.ClearWorkerTasks();
platform.ClearIdleTask();
}
@@ -331,79 +364,71 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+
ASSERT_TRUE(dispatcher.FinishNow(shared));
// Finishing removes the SFI from the queue.
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
-}
-
-TEST_F(CompilerDispatcherTest, FinishAllNow) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- constexpr int num_funcs = 2;
- Handle<JSFunction> f[num_funcs];
- Handle<SharedFunctionInfo> shared[num_funcs];
-
- for (int i = 0; i < num_funcs; ++i) {
- std::stringstream ss;
- ss << 'f' << STR(__LINE__) << '_' << i;
- std::string func_name = ss.str();
- std::string script("function f" + func_name + "(x, y) { return x * y }; f" +
- func_name + ";");
- f[i] = RunJS<JSFunction>(script.c_str());
- shared[i] = Handle<SharedFunctionInfo>(f[i]->shared(), i_isolate());
- ASSERT_FALSE(shared[i]->is_compiled());
- ASSERT_TRUE(dispatcher.Enqueue(shared[i]));
- }
- dispatcher.FinishAllNow();
- for (int i = 0; i < num_funcs; ++i) {
- // Finishing removes the SFI from the queue.
- ASSERT_FALSE(dispatcher.IsEnqueued(shared[i]));
- ASSERT_TRUE(shared[i]->is_compiled());
- }
- platform.ClearIdleTask();
platform.ClearWorkerTasks();
+ platform.ClearIdleTask();
}
TEST_F(CompilerDispatcherTest, IdleTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_TRUE(platform.IdleTaskPending());
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
platform.RunIdleTask(1000.0, 0.0);
+ // Since we haven't yet registered the SFI for the job, it should still be
+ // enqueued and waiting.
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
+ ASSERT_FALSE(shared->is_compiled());
+ ASSERT_FALSE(platform.IdleTaskPending());
+
+ // Register SFI, which should schedule another idle task to complete the
+ // compilation.
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(platform.IdleTaskPending());
+ platform.RunIdleTask(1000.0, 0.0);
+
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+
ASSERT_TRUE(platform.IdleTaskPending());
// The job should be scheduled for the main thread.
@@ -419,9 +444,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
ASSERT_TRUE(platform.IdleTaskPending());
// The job should be still scheduled for the main thread, but ready for
- // parsing.
+ // finalization.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
@@ -430,25 +455,28 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, IdleTaskException) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
- std::string func_name("f" STR(__LINE__));
- std::string script("function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 500; i++) {
+ std::string raw_script("(x) { var a = ");
+ for (int i = 0; i < 1000; i++) {
// Alternate + and - to avoid n-ary operation nodes.
- script += "'x' + 'x' - ";
+ raw_script += "'x' + 'x' - ";
}
- script += " 'x'; }; " + func_name + ";";
- Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ raw_script += " 'x'; };";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), script);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
@@ -457,41 +485,33 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(i_isolate()->has_pending_exception());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
@@ -500,34 +520,30 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.WorkerTasksPending());
}
TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_EQ(dispatcher.jobs_.size(), 1u);
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(platform.WorkerTasksPending());
// This does not block, but races with the FinishNow() call below.
@@ -545,46 +561,54 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
- ASSERT_TRUE(platform.IdleTaskPending());
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
+
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
platform.RunIdleTask(1000.0, 0.0);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared2));
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, FinishNowException) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
- std::string func_name("f" STR(__LINE__));
- std::string script("function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 500; i++) {
+ std::string raw_script("(x) { var a = ");
+ for (int i = 0; i < 1000; i++) {
// Alternate + and - to avoid n-ary operation nodes.
- script += "'x' + 'x' - ";
+ raw_script += "'x' + 'x' - ";
}
- script += " 'x'; }; " + func_name + ";";
- Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ raw_script += " 'x'; };";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), script);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_FALSE(dispatcher.FinishNow(shared));
@@ -594,34 +618,26 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
i_isolate()->clear_pending_exception();
platform.ClearIdleTask();
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// The background task hasn't yet started, so we can just cancel it.
@@ -642,32 +658,23 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
-
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(shared1->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(shared_1->is_compiled());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// Kick off background tasks and freeze them.
@@ -681,7 +688,9 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
ASSERT_TRUE(platform.ForegroundTasksPending());
// We can't schedule new tasks while we're aborting.
- ASSERT_FALSE(dispatcher.Enqueue(shared2));
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ ASSERT_FALSE(job_id_2);
// Run the first AbortTask. Since the background job is still pending, it
// can't do anything.
@@ -711,10 +720,14 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
ASSERT_FALSE(platform.ForegroundTasksPending());
// Now it's possible to enqueue new functions again.
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
+ job_id_2 = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ ASSERT_TRUE(job_id_2);
ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_FALSE(platform.WorkerTasksPending());
+ ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.ForegroundTasksPending());
+
+ dispatcher.AbortAll(BlockingBehavior::kBlock);
+ platform.ClearWorkerTasks();
platform.ClearIdleTask();
}
@@ -722,28 +735,20 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// Kick off background tasks and freeze them.
@@ -764,7 +769,12 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
ASSERT_TRUE(dispatcher.abort_);
}
- // While the background thread holds on to a job, it is still enqueud.
+ // Run the idle task, which should have already been canceled and won't do
+ // anything.
+ ASSERT_TRUE(platform.IdleTaskPending());
+ platform.RunIdleTask(5.0, 1.0);
+
+ // While the background thread holds on to a job, it is still enqueued.
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
// Release background task.
@@ -783,7 +793,7 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
}
ASSERT_TRUE(platform.ForegroundTasksPending());
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
platform.RunForegroundTasks();
@@ -791,32 +801,34 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
ASSERT_FALSE(dispatcher.abort_);
}
-
- platform.ClearForegroundTasks();
- platform.ClearIdleTask();
}
TEST_F(CompilerDispatcherTest, MemoryPressure) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
// Can't enqueue tasks under memory pressure.
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
true);
- ASSERT_FALSE(dispatcher.Enqueue(shared));
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ ASSERT_FALSE(job_id);
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kNone, true);
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ job_id = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ ASSERT_TRUE(job_id);
// Memory pressure cancels current jobs.
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
true);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
platform.ClearIdleTask();
+ platform.ClearWorkerTasks();
}
namespace {
@@ -826,7 +838,7 @@ class PressureNotificationTask : public CancelableTask {
PressureNotificationTask(Isolate* isolate, CompilerDispatcher* dispatcher,
base::Semaphore* sem)
: CancelableTask(isolate), dispatcher_(dispatcher), sem_(sem) {}
- ~PressureNotificationTask() override {}
+ ~PressureNotificationTask() override = default;
void RunInternal() override {
dispatcher_->MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
@@ -847,11 +859,14 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
- ASSERT_TRUE(dispatcher.Enqueue(shared));
base::Semaphore sem(0);
V8::GetCurrentPlatform()->CallOnWorkerThread(
base::make_unique<PressureNotificationTask>(i_isolate(), &dispatcher,
@@ -873,44 +888,6 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
ASSERT_FALSE(platform.ForegroundTasksPending());
platform.ClearIdleTask();
-}
-
-TEST_F(CompilerDispatcherTest, EnqueueJob) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
- std::unique_ptr<CompilerDispatcherJob> job(
- new UnoptimizedCompileJob(i_isolate(), dispatcher.tracer_.get(), shared,
- dispatcher.max_stack_size_));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- dispatcher.Enqueue(std::move(job));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
-
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
- ASSERT_FALSE(platform.WorkerTasksPending());
-}
-
-TEST_F(CompilerDispatcherTest, EnqueueAndStep) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
-
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
- ASSERT_TRUE(platform.WorkerTasksPending());
platform.ClearWorkerTasks();
}
@@ -919,14 +896,16 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
// enqueued functions.
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
- const char script[] = "function lazy() { return 42; }; lazy;";
+ const char raw_script[] = "function lazy() { return 42; }; lazy;";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(dispatcher->IsEnqueued(shared));
- ASSERT_TRUE(dispatcher->Enqueue(shared));
- ASSERT_TRUE(dispatcher->IsEnqueued(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared);
+ dispatcher->RegisterSharedFunctionInfo(*job_id, *shared);
// Now force the function to run and ensure CompileLazy finished and dequeues
// it from the dispatcher.
@@ -940,66 +919,57 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
// enqueued functions.
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
- const char source2[] = "function lazy2() { return 42; }; lazy2;";
- Handle<JSFunction> lazy2 = RunJS<JSFunction>(source2);
- Handle<SharedFunctionInfo> shared2(lazy2->shared(), i_isolate());
- ASSERT_FALSE(shared2->is_compiled());
+ const char raw_source_2[] = "function lazy2() { return 42; }; lazy2;";
+ test::ScriptResource* source_2 =
+ new test::ScriptResource(raw_source_2, strlen(raw_source_2));
+ Handle<JSFunction> lazy2 = RunJS<JSFunction>(source_2);
+ Handle<SharedFunctionInfo> shared_2(lazy2->shared(), i_isolate());
+ ASSERT_FALSE(shared_2->is_compiled());
- const char source1[] = "function lazy1() { return lazy2(); }; lazy1;";
- Handle<JSFunction> lazy1 = RunJS<JSFunction>(source1);
- Handle<SharedFunctionInfo> shared1(lazy1->shared(), i_isolate());
- ASSERT_FALSE(shared1->is_compiled());
+ const char raw_source_1[] = "function lazy1() { return lazy2(); }; lazy1;";
+ test::ScriptResource* source_1 =
+ new test::ScriptResource(raw_source_1, strlen(raw_source_1));
+ Handle<JSFunction> lazy1 = RunJS<JSFunction>(source_1);
+ Handle<SharedFunctionInfo> shared_1(lazy1->shared(), i_isolate());
+ ASSERT_FALSE(shared_1->is_compiled());
- ASSERT_TRUE(dispatcher->Enqueue(shared1));
- ASSERT_TRUE(dispatcher->Enqueue(shared2));
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_1);
+ dispatcher->RegisterSharedFunctionInfo(*job_id_1, *shared_1);
- RunJS("lazy1();");
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
- ASSERT_FALSE(dispatcher->IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher->IsEnqueued(shared2));
-}
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_2);
+ dispatcher->RegisterSharedFunctionInfo(*job_id_2, *shared_2);
-TEST_F(CompilerDispatcherTest, EnqueueAndStepTwice) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ ASSERT_TRUE(dispatcher->IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher->IsEnqueued(shared_2));
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- // EnqueueAndStep of the same function again (shouldn't step the job.
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_TRUE(platform.WorkerTasksPending());
- platform.ClearIdleTask();
- platform.ClearWorkerTasks();
+ RunJS("lazy1();");
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
+ ASSERT_FALSE(dispatcher->IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher->IsEnqueued(shared_2));
}
TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
- ASSERT_TRUE(platform.IdleTaskPending());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
+
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
+
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
@@ -1007,21 +977,11 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
(++dispatcher.jobs_.begin())->second->status());
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- (++dispatcher.jobs_.begin())->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared2));
- ASSERT_FALSE(shared1->is_compiled());
- ASSERT_FALSE(shared2->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_FALSE(shared_1->is_compiled());
+ ASSERT_FALSE(shared_2->is_compiled());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
@@ -1029,26 +989,20 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
(++dispatcher.jobs_.begin())->second->status());
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared2));
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
}
-#undef _STR
-#undef STR
-#undef _SCRIPT
-#undef SCRIPT
-#undef TEST_SCRIPT
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
index 5a0e89326b..e3d4ae078b 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
@@ -16,6 +16,7 @@
#include "src/flags.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
+#include "src/parsing/preparsed-scope-data.h"
#include "src/v8.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
@@ -26,9 +27,11 @@ namespace internal {
class UnoptimizedCompileJobTest : public TestWithNativeContext {
public:
- UnoptimizedCompileJobTest() : tracer_(isolate()) {}
- ~UnoptimizedCompileJobTest() override {}
+ UnoptimizedCompileJobTest()
+ : tracer_(isolate()), allocator_(isolate()->allocator()) {}
+ ~UnoptimizedCompileJobTest() override = default;
+ AccountingAllocator* allocator() { return allocator_; }
CompilerDispatcherTracer* tracer() { return &tracer_; }
static void SetUpTestCase() {
@@ -44,15 +47,43 @@ class UnoptimizedCompileJobTest : public TestWithNativeContext {
save_flags_ = nullptr;
}
- static Variable* LookupVariableByName(UnoptimizedCompileJob* job,
- const char* name) {
- const AstRawString* name_raw_string =
- job->parse_info_->ast_value_factory()->GetOneByteString(name);
- return job->parse_info_->literal()->scope()->Lookup(name_raw_string);
+ UnoptimizedCompileJob* NewUnoptimizedCompileJob(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ size_t stack_size = FLAG_stack_size) {
+ std::unique_ptr<ParseInfo> outer_parse_info =
+ test::OuterParseInfoForShared(isolate, shared);
+ AstValueFactory* ast_value_factory =
+ outer_parse_info->GetOrCreateAstValueFactory();
+ AstNodeFactory ast_node_factory(ast_value_factory,
+ outer_parse_info->zone());
+
+ const AstRawString* function_name =
+ ast_value_factory->GetOneByteString("f");
+ DeclarationScope* script_scope = new (outer_parse_info->zone())
+ DeclarationScope(outer_parse_info->zone(), ast_value_factory);
+ DeclarationScope* function_scope =
+ new (outer_parse_info->zone()) DeclarationScope(
+ outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
+ function_scope->set_start_position(shared->StartPosition());
+ function_scope->set_end_position(shared->EndPosition());
+ const FunctionLiteral* function_literal =
+ ast_node_factory.NewFunctionLiteral(
+ function_name, function_scope, nullptr, -1, -1, -1,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
+ shared->FunctionLiteralId(isolate), nullptr);
+
+ return new UnoptimizedCompileJob(
+ tracer(), allocator(), outer_parse_info.get(), function_name,
+ function_literal,
+ isolate->counters()->worker_thread_runtime_call_stats(),
+ isolate->counters()->compile_function_on_background(), FLAG_stack_size);
}
private:
CompilerDispatcherTracer tracer_;
+ AccountingAllocator* allocator_;
static SaveFlags* save_flags_;
DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJobTest);
@@ -63,24 +94,25 @@ SaveFlags* UnoptimizedCompileJobTest::save_flags_ = nullptr;
#define ASSERT_JOB_STATUS(STATUS, JOB) ASSERT_EQ(STATUS, JOB->status())
TEST_F(UnoptimizedCompileJobTest, Construct) {
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
}
TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kCompiled, job);
- job->FinalizeOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
job->ResetOnMainThread(isolate());
@@ -89,15 +121,16 @@ TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
test::ScriptResource* script = new test::ScriptResource("^^^", strlen("^^^"));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
+
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_TRUE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -109,7 +142,7 @@ TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
}
TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
- const char script[] =
+ const char raw_script[] =
"function g() {\n"
" f = function(a) {\n"
" for (var i = 0; i < 3; i++) { a += 20; }\n"
@@ -118,29 +151,28 @@ TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
" return f;\n"
"}\n"
"g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), handle(f->shared(), f->GetIsolate()),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
Smi* value = Smi::cast(*RunJS("f(100);"));
ASSERT_TRUE(value == Smi::FromInt(160));
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, CompileFailureToAnalyse) {
+TEST_F(UnoptimizedCompileJobTest, CompileFailure) {
std::string raw_script("() { var a = ");
- for (int i = 0; i < 500000; i++) {
+ for (int i = 0; i < 10000; i++) {
// TODO(leszeks): Figure out a more "unit-test-y" way of forcing an analysis
// failure than a binop stack overflow.
@@ -150,42 +182,16 @@ TEST_F(UnoptimizedCompileJobTest, CompileFailureToAnalyse) {
raw_script += " 'x'; }";
test::ScriptResource* script =
new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 100));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared, 100));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
- ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
- ASSERT_TRUE(isolate()->has_pending_exception());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
- isolate()->clear_pending_exception();
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
-}
-
-TEST_F(UnoptimizedCompileJobTest, CompileFailureToFinalize) {
- std::string raw_script("() { var a = ");
- for (int i = 0; i < 500; i++) {
- // Alternate + and - to avoid n-ary operation nodes.
- raw_script += "'x' + 'x' - ";
- }
- raw_script += " 'x'; }";
- test::ScriptResource* script =
- new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 50));
-
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_TRUE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -199,7 +205,7 @@ class CompileTask : public Task {
public:
CompileTask(UnoptimizedCompileJob* job, base::Semaphore* semaphore)
: job_(job), semaphore_(semaphore) {}
- ~CompileTask() override {}
+ ~CompileTask() override = default;
void Run() override {
job_->Compile(true);
@@ -223,19 +229,18 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
"}";
test::ScriptResource* script =
new test::ScriptResource(raw_script, strlen(raw_script));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 100));
-
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
base::Semaphore semaphore(0);
auto background_task = base::make_unique<CompileTask>(job.get(), &semaphore);
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(background_task));
semaphore.Wait();
- job->FinalizeOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
@@ -243,26 +248,64 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
- const char script[] =
- "f = function() {\n"
- " e = (function() { return 42; });\n"
- " return e;\n"
- "};\n"
- "f;";
+TEST_F(UnoptimizedCompileJobTest, EagerInnerFunctions) {
+ const char raw_script[] =
+ "function g() {\n"
+ " f = function() {\n"
+ " // Simulate an eager IIFE with brackets.\n "
+ " var e = (function () { return 42; });\n"
+ " return e;\n"
+ " }\n"
+ " return f;\n"
+ "}\n"
+ "g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), handle(f->shared(), f->GetIsolate()),
- FLAG_stack_size));
-
- job->PrepareOnMainThread(isolate());
+ job->Compile(false);
+ ASSERT_FALSE(job->IsFailed());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
+
+ Handle<JSFunction> e = RunJS<JSFunction>("f();");
+
+ ASSERT_TRUE(e->shared()->is_compiled());
+
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+}
+
+TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
+ const char raw_script[] =
+ "function g() {\n"
+ " f = function() {\n"
+ " function e() { return 42; };\n"
+ " return e;\n"
+ " }\n"
+ " return f;\n"
+ "}\n"
+ "g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
+
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
Handle<JSFunction> e = RunJS<JSFunction>("f();");
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 48c15934df..53b9c6a241 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -22,8 +22,8 @@ using ToBooleanMode = interpreter::BytecodeArrayBuilder::ToBooleanMode;
class BytecodeAnalysisTest : public TestWithIsolateAndZone {
public:
- BytecodeAnalysisTest() {}
- ~BytecodeAnalysisTest() override {}
+ BytecodeAnalysisTest() = default;
+ ~BytecodeAnalysisTest() override = default;
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
diff --git a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
index a201fc9a55..22ed2abf9b 100644
--- a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
@@ -18,7 +18,7 @@ namespace compiler {
class CheckpointEliminationTest : public GraphTest {
public:
CheckpointEliminationTest() : GraphTest() {}
- ~CheckpointEliminationTest() override {}
+ ~CheckpointEliminationTest() override = default;
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.h b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
index 21f3df5f4b..56f1959765 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.h
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
@@ -15,8 +15,8 @@ namespace compiler {
class CodeAssemblerTest : public TestWithIsolateAndZone {
public:
- CodeAssemblerTest() {}
- ~CodeAssemblerTest() override {}
+ CodeAssemblerTest() = default;
+ ~CodeAssemblerTest() override = default;
};
class CodeAssemblerTestState : public CodeAssemblerState {
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index cb5b5fd806..f40cab2758 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -23,7 +23,7 @@ class CommonOperatorReducerTest : public GraphTest {
public:
explicit CommonOperatorReducerTest(int num_parameters = 1)
: GraphTest(num_parameters), machine_(zone()), simplified_(zone()) {}
- ~CommonOperatorReducerTest() override {}
+ ~CommonOperatorReducerTest() override = default;
protected:
Reduction Reduce(
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 2ee0dbb382..4d66ded5f1 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -120,7 +120,7 @@ namespace {
class CommonOperatorTest : public TestWithZone {
public:
CommonOperatorTest() : common_(zone()) {}
- ~CommonOperatorTest() override {}
+ ~CommonOperatorTest() override = default;
CommonOperatorBuilder* common() { return &common_; }
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 464ee3a971..fd0845159f 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -66,7 +66,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
js_heap_broker_(isolate(), zone()),
simplified_(zone()),
deps_(isolate(), zone()) {}
- ~ConstantFoldingReducerTest() override {}
+ ~ConstantFoldingReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index 607df1fafb..992ddcc55b 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -21,7 +21,7 @@ class ControlFlowOptimizerTest : public GraphTest {
public:
explicit ControlFlowOptimizerTest(int num_parameters = 3)
: GraphTest(num_parameters), machine_(zone()), javascript_(zone()) {}
- ~ControlFlowOptimizerTest() override {}
+ ~ControlFlowOptimizerTest() override = default;
protected:
void Optimize() {
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index 4444ed0ca5..72e02e1416 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -20,7 +20,7 @@ class DeadCodeEliminationTest : public GraphTest {
public:
explicit DeadCodeEliminationTest(int num_parameters = 4)
: GraphTest(num_parameters) {}
- ~DeadCodeEliminationTest() override {}
+ ~DeadCodeEliminationTest() override = default;
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index dc2f2189d1..f506502610 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -751,7 +751,7 @@ TEST_F(GraphReducerTest, Sorter1) {
Node* n1 = graph()->NewNode(&kOpA0);
Node* n2 = graph()->NewNode(&kOpA1, n1);
Node* n3 = graph()->NewNode(&kOpA1, n1);
- Node* end = NULL; // Initialize to please the compiler.
+ Node* end = nullptr; // Initialize to please the compiler.
if (i == 0) end = graph()->NewNode(&kOpA2, n2, n3);
if (i == 1) end = graph()->NewNode(&kOpA2, n3, n2);
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index af2c382f5b..4736ddefa2 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/compiler/graph-unittest.h"
+#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory.h"
#include "src/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
@@ -24,16 +25,22 @@ GraphTest::GraphTest(int num_parameters)
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
+ js_heap_broker()->SetNativeContextRef();
}
-GraphTest::~GraphTest() {}
+GraphTest::~GraphTest() = default;
Node* GraphTest::Parameter(int32_t index) {
return graph()->NewNode(common()->Parameter(index), graph()->start());
}
+Node* GraphTest::Parameter(Type type, int32_t index) {
+ Node* node = GraphTest::Parameter(index);
+ NodeProperties::SetType(node, type);
+ return node;
+}
Node* GraphTest::Float32Constant(volatile float value) {
return graph()->NewNode(common()->Float32Constant(value));
@@ -113,15 +120,9 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
TypedGraphTest::TypedGraphTest(int num_parameters)
: GraphTest(num_parameters),
- typer_(isolate(), js_heap_broker(), Typer::kNoFlags, graph()) {}
-
-TypedGraphTest::~TypedGraphTest() {}
+ typer_(js_heap_broker(), Typer::kNoFlags, graph()) {}
-Node* TypedGraphTest::Parameter(Type type, int32_t index) {
- Node* node = GraphTest::Parameter(index);
- NodeProperties::SetType(node, type);
- return node;
-}
+TypedGraphTest::~TypedGraphTest() = default;
namespace graph_unittest {
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index d9b9934770..8317ebf279 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -34,6 +34,7 @@ class GraphTest : public virtual TestWithNativeContext,
Node* end() { return graph()->end(); }
Node* Parameter(int32_t index = 0);
+ Node* Parameter(Type type, int32_t index = 0);
Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
Node* Int32Constant(int32_t value);
@@ -79,9 +80,6 @@ class TypedGraphTest : public GraphTest {
~TypedGraphTest() override;
protected:
- Node* Parameter(int32_t index = 0) { return GraphTest::Parameter(index); }
- Node* Parameter(Type type, int32_t index = 0);
-
Typer* typer() { return &typer_; }
private:
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index ed4a1c648a..2d59393f9d 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -20,7 +20,7 @@ namespace compiler {
InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
-InstructionSelectorTest::~InstructionSelectorTest() {}
+InstructionSelectorTest::~InstructionSelectorTest() = default;
InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
@@ -365,7 +365,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
ZoneVector<MachineType> empty_types(zone());
auto call_descriptor = Linkage::GetJSCallDescriptor(
- zone(), false, 1, CallDescriptor::kNeedsFrameState);
+ zone(), false, 1,
+ CallDescriptor::kNeedsFrameState | CallDescriptor::kCanUseRoots);
// Build frame state for the state before the call.
Node* parameters = m.AddNode(
diff --git a/deps/v8/test/unittests/compiler/instruction-unittest.cc b/deps/v8/test/unittests/compiler/instruction-unittest.cc
index 96add7fdd8..72deb12d02 100644
--- a/deps/v8/test/unittests/compiler/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-unittest.cc
@@ -38,8 +38,8 @@ bool Contains(const ZoneVector<MoveOperands*>* moves,
class InstructionTest : public TestWithZone {
public:
- InstructionTest() {}
- virtual ~InstructionTest() {}
+ InstructionTest() = default;
+ ~InstructionTest() override = default;
ParallelMove* CreateParallelMove(
const std::vector<InstructionOperand>& operand_pairs) {
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 53e3b48762..7660f5851e 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -21,11 +21,12 @@ namespace compiler {
class JSCallReducerTest : public TypedGraphTest {
public:
JSCallReducerTest()
- : TypedGraphTest(3),
- javascript_(zone()),
- deps_(isolate(), zone()),
- js_heap_broker(isolate(), zone()) {}
- ~JSCallReducerTest() override {}
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {
+ if (FLAG_concurrent_compiler_frontend) {
+ js_heap_broker()->SerializeStandardObjects();
+ }
+ }
+ ~JSCallReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -36,7 +37,7 @@ class JSCallReducerTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSCallReducer reducer(&graph_reducer, &jsgraph, &js_heap_broker,
+ JSCallReducer reducer(&graph_reducer, &jsgraph, js_heap_broker(),
JSCallReducer::kNoFlags, native_context(), &deps_);
return reducer.Reduce(node);
}
@@ -45,16 +46,13 @@ class JSCallReducerTest : public TypedGraphTest {
static void SetUpTestCase() {
old_flag_lazy_ = i::FLAG_lazy_deserialization;
- old_flag_lazy_handler_ = i::FLAG_lazy_handler_deserialization;
i::FLAG_lazy_deserialization = false;
- i::FLAG_lazy_handler_deserialization = false;
TypedGraphTest::SetUpTestCase();
}
static void TearDownTestCase() {
TypedGraphTest::TearDownTestCase();
i::FLAG_lazy_deserialization = old_flag_lazy_;
- i::FLAG_lazy_handler_deserialization = old_flag_lazy_handler_;
}
Node* GlobalFunction(const char* name) {
@@ -124,7 +122,7 @@ class JSCallReducerTest : public TypedGraphTest {
// overwriting existing metadata.
shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
Handle<FeedbackVector> vector = FeedbackVector::New(isolate(), shared);
- VectorSlotPair feedback(vector, FeedbackSlot(0));
+ VectorSlotPair feedback(vector, FeedbackSlot(0), UNINITIALIZED);
return javascript()->Call(arity, CallFrequency(), feedback,
ConvertReceiverMode::kAny,
SpeculationMode::kAllowSpeculation);
@@ -133,7 +131,6 @@ class JSCallReducerTest : public TypedGraphTest {
private:
JSOperatorBuilder javascript_;
CompilationDependencies deps_;
- JSHeapBroker js_heap_broker;
static bool old_flag_lazy_;
static bool old_flag_lazy_handler_;
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 2db241aaa9..eafd7fa35e 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -33,8 +33,9 @@ class JSCreateLoweringTest : public TypedGraphTest {
: TypedGraphTest(3),
javascript_(zone()),
deps_(isolate(), zone()),
- handle_scope_(isolate()) {}
- ~JSCreateLoweringTest() override {}
+ handle_scope_(isolate()) {
+ }
+ ~JSCreateLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -45,7 +46,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, js_heap_broker(),
- native_context(), zone());
+ zone());
return reducer.Reduce(node);
}
@@ -172,7 +173,7 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
+ Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -192,7 +193,7 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
+ Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 2b0ccaed24..234fe940eb 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -26,7 +26,7 @@ namespace compiler {
class JSIntrinsicLoweringTest : public GraphTest {
public:
JSIntrinsicLoweringTest() : GraphTest(3), javascript_(zone()) {}
- ~JSIntrinsicLoweringTest() override {}
+ ~JSIntrinsicLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
new file mode 100644
index 0000000000..fdc87904c4
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
@@ -0,0 +1,50 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/graph-unittest.h"
+
+#include "src/compiler/js-native-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/dtoa.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace js_native_context_specialization_unittest {
+
+class JSNativeContextSpecializationTest : public GraphTest {
+ public:
+ explicit JSNativeContextSpecializationTest(int num_parameters = 1)
+ : GraphTest(num_parameters), javascript_(zone()) {}
+ ~JSNativeContextSpecializationTest() override {}
+
+ protected:
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+TEST_F(JSNativeContextSpecializationTest, GetMaxStringLengthOfString) {
+ const size_t str_len = 3;
+ const size_t num_len = kBase10MaximalLength + 1;
+
+ Node* const str_node = graph()->NewNode(
+ common()->HeapConstant(factory()->InternalizeUtf8String("str")));
+ EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
+ str_node),
+ str_len);
+
+ Node* const num_node = graph()->NewNode(common()->NumberConstant(10.0 / 3));
+ EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
+ num_node),
+ num_len);
+}
+
+} // namespace js_native_context_specialization_unittest
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 9ce837cd8c..43998824d2 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -38,7 +38,7 @@ Type const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
class JSTypedLoweringTest : public TypedGraphTest {
public:
JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
- ~JSTypedLoweringTest() override {}
+ ~JSTypedLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -401,12 +401,7 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED)
- .code()),
- lhs, rhs, context, frame_state, effect, control));
+ EXPECT_THAT(r.replacement(), IsStringConcat(_, lhs, rhs));
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 5c49468991..042e7e6bbc 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -26,7 +26,7 @@ class LoadEliminationTest : public TypedGraphTest {
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), nullptr, simplified(), nullptr) {
}
- ~LoadEliminationTest() override {}
+ ~LoadEliminationTest() override = default;
protected:
JSGraph* jsgraph() { return &jsgraph_; }
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index d39336dfa5..07013aa52c 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -54,7 +54,7 @@ struct Counter {
class LoopPeelingTest : public GraphTest {
public:
LoopPeelingTest() : GraphTest(1), machine_(zone()) {}
- ~LoopPeelingTest() override {}
+ ~LoopPeelingTest() override = default;
protected:
MachineOperatorBuilder machine_;
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index f47e780426..b8b0c9004f 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -71,7 +71,7 @@ class MachineOperatorReducerTestWithParam
public:
explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
: MachineOperatorReducerTest(num_parameters) {}
- ~MachineOperatorReducerTestWithParam() override {}
+ ~MachineOperatorReducerTestWithParam() override = default;
};
@@ -344,6 +344,27 @@ TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
}
}
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt64
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToInt64WithChangeInt64ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt64(),
+ graph()->NewNode(machine()->ChangeInt64ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt64(), Float64Constant(FastI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+ }
+}
// -----------------------------------------------------------------------------
// ChangeFloat64ToUint32
@@ -397,6 +418,27 @@ TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
}
}
+// -----------------------------------------------------------------------------
+// ChangeInt64ToFloat64
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeInt64ToFloat64WithChangeFloat64ToInt64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeInt64ToFloat64(),
+ graph()->NewNode(machine()->ChangeFloat64ToInt64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(MachineOperatorReducerTest, ChangeInt64ToFloat64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt64ToFloat64(), Int64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(BitEq(FastI2D(x))));
+ }
+}
// -----------------------------------------------------------------------------
// ChangeUint32ToFloat64
@@ -2020,6 +2062,16 @@ TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
// -----------------------------------------------------------------------------
// Float64Equal
+TEST_F(MachineOperatorReducerTest, Float64EqualWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(), Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x == y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Conversions) {
Node* const p0 = Parameter(0);
@@ -2049,6 +2101,17 @@ TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Constant) {
// -----------------------------------------------------------------------------
// Float64LessThan
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64LessThan(),
+ Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x < y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Conversions) {
Node* const p0 = Parameter(0);
@@ -2089,6 +2152,17 @@ TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Constant) {
// -----------------------------------------------------------------------------
// Float64LessThanOrEqual
+TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x <= y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest,
Float64LessThanOrEqualWithFloat32Conversions) {
diff --git a/deps/v8/test/unittests/compiler/node-cache-unittest.cc b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
index f77377deda..b699fb38ca 100644
--- a/deps/v8/test/unittests/compiler/node-cache-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
@@ -63,7 +63,7 @@ TEST_F(NodeCacheTest, Int32Constant_hits) {
for (int i = 0; i < kSize; i++) {
int32_t v = i * -55;
Node** pos = cache.Find(zone(), v);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
}
@@ -101,7 +101,7 @@ TEST_F(NodeCacheTest, Int64Constant_hits) {
for (int i = 0; i < kSize; i++) {
int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
Node** pos = cache.Find(zone(), v);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
}
@@ -118,7 +118,7 @@ TEST_F(NodeCacheTest, GetCachedNodes_int32) {
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
Node** pos = cache.Find(zone(), k);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
EXPECT_THAT(nodes, Contains(*pos));
@@ -141,7 +141,7 @@ TEST_F(NodeCacheTest, GetCachedNodes_int64) {
for (size_t i = 0; i < arraysize(constants); i++) {
int64_t k = constants[i];
Node** pos = cache.Find(zone(), k);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
EXPECT_THAT(nodes, Contains(*pos));
diff --git a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
index 7f043049f0..2663e3abb8 100644
--- a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
@@ -19,7 +19,7 @@ namespace compiler {
class NodeMatcherTest : public GraphTest {
public:
NodeMatcherTest() : machine_(zone()) {}
- ~NodeMatcherTest() override {}
+ ~NodeMatcherTest() override = default;
MachineOperatorBuilder* machine() { return &machine_; }
@@ -150,191 +150,191 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
// (B0 + B1) -> [B0, 0, B1, NULL]
BaseWithIndexAndDisplacement32Matcher match1(graph()->NewNode(a_op, b0, b1));
- CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, nullptr);
// (B0 + D15) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement32Matcher match2(graph()->NewNode(a_op, b0, d15));
- CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match2, nullptr, 0, b0, d15);
// (D15 + B0) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement32Matcher match3(graph()->NewNode(a_op, d15, b0));
- CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match3, nullptr, 0, b0, d15);
// (B0 + M1) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match4(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match5(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match6(graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match6, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match7(graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match7, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match8(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match9(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match10(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match10, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match11(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match11, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match12(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match13(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match14(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match14, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match15(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match15, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match16(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match17(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match18(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match18, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match19(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match19, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match20(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match21(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match22(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match22, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match23(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match23, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match24(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match25(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match26(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match26, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match27(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match27, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match28(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match29(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match30(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match30, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match31(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match31, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match32(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match33(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match34(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match34, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match35(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match35, p1, 3, nullptr, d15);
// 2 INPUT - NEGATIVE CASES
// (M3 + B1) -> [B0, 0, M3, NULL]
BaseWithIndexAndDisplacement32Matcher match36(graph()->NewNode(a_op, b1, m3));
- CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, nullptr);
// (S4 + B1) -> [B0, 0, S4, NULL]
BaseWithIndexAndDisplacement32Matcher match37(graph()->NewNode(a_op, b1, s4));
- CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, nullptr);
// 3 INPUT
@@ -400,209 +400,209 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match47(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match47, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match47, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match48(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match48, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match48, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match49(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match49, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match49, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match50(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match50, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match50, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match51(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match51, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match51, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match52(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match52, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match52, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match53(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match53, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match53, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match54(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match54, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match54, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match55(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match55, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match55, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match56(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match56, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match56, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match57(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match57, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match57, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match58(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match58, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match58, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match59(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match59, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match59, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match60(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match60, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match60, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match61(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match61, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match61, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match62(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match62, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match62, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match63(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match63, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match63, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match64(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match64, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match64, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match65(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match65, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match65, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match66(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match66, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match66, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match67(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match67, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match67, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match68(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match68, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match68, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match69(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match69, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match69, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match70(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match70, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match70, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match71(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match71, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match71, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match72(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match72, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match72, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match73(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match73, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match73, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match74(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match74, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match74, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match75(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match75, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match75, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match76(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match76, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match76, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match77(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match77, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match77, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match78(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match78, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match78, p1, 3, nullptr, d15);
// (D15 + S3) + B0 -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -627,7 +627,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match81(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match81, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match81, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (s3 + b0), d15]
// Avoid changing simple addressing to complex addressing
@@ -636,7 +636,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match82(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match82, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match82, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -686,7 +686,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match88(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match88, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match88, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (b0 + b1), d15]
// Avoid changing simple addressing to complex addressing
@@ -694,7 +694,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match89(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match89, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match89, nullptr, 0, temp, d15);
// 5 INPUT - with none-addressing operand uses
@@ -702,219 +702,219 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match90(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match90, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match90, b0, 0, m1, nullptr);
// (M1 + B0) -> [b0, 0, m1, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match91(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match91, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match91, b0, 0, m1, nullptr);
// (D15 + M1) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match92(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match92, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match92, nullptr, 0, m1, d15);
// (M1 + D15) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match93(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match93, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match93, nullptr, 0, m1, d15);
// (B0 + S0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match94(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match94, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match94, b0, 0, s0, nullptr);
// (S0 + B0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match95(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match95, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match95, b0, 0, s0, nullptr);
// (D15 + S0) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match96(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match96, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match96, nullptr, 0, s0, d15);
// (S0 + D15) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match97(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match97, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match97, nullptr, 0, s0, d15);
// (B0 + M2) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match98(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m2, nullptr);
// (M2 + B0) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match99(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match99, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match99, b0, 0, m2, nullptr);
// (D15 + M2) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match100(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match100, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match100, nullptr, 0, m2, d15);
// (M2 + D15) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match101(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match101, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match101, nullptr, 0, m2, d15);
// (B0 + S1) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match102(
graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s1, nullptr);
// (S1 + B0) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match103(
graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match103, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match103, b0, 0, s1, nullptr);
// (D15 + S1) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match104(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match104, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match104, nullptr, 0, s1, d15);
// (S1 + D15) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match105(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match105, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match105, nullptr, 0, s1, d15);
// (B0 + M4) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match106(
graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m4, nullptr);
// (M4 + B0) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match107(
graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match107, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match107, b0, 0, m4, nullptr);
// (D15 + M4) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match108(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match108, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match108, nullptr, 0, m4, d15);
// (M4 + D15) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match109(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match109, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match109, nullptr, 0, m4, d15);
// (B0 + S2) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match110(
graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s2, nullptr);
// (S2 + B0) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match111(
graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match111, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match111, b0, 0, s2, nullptr);
// (D15 + S2) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match112(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match112, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match112, nullptr, 0, s2, d15);
// (S2 + D15) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match113(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match113, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match113, nullptr, 0, s2, d15);
// (B0 + M8) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match114(
graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m8, nullptr);
// (M8 + B0) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match115(
graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match115, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match115, b0, 0, m8, nullptr);
// (D15 + M8) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match116(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match116, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match116, nullptr, 0, m8, d15);
// (M8 + D15) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match117(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match117, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match117, nullptr, 0, m8, d15);
// (B0 + S3) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match118(
graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s3, nullptr);
// (S3 + B0) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match119(
graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match119, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match119, b0, 0, s3, nullptr);
// (D15 + S3) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match120(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match120, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match120, nullptr, 0, s3, d15);
// (S3 + D15) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match121(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match121, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match121, nullptr, 0, s3, d15);
// (D15 + S3) + B0 -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -922,7 +922,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match122(
graph()->NewNode(a_op, temp, b0));
- CheckBaseWithIndexAndDisplacement(&match122, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match122, b0, 0, temp, nullptr);
// (B0 + D15) + S3 -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -930,7 +930,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match123(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match123, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match123, p1, 3, temp, nullptr);
// (S3 + B0) + D15 -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -938,7 +938,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match124(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match124, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match124, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -946,7 +946,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match125(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match125, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match125, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -954,7 +954,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match126(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match126, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match126, b0, 0, temp, nullptr);
// S3 + (B0 + D15) -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -962,7 +962,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match127(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match127, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match127, p1, 3, temp, nullptr);
// S3 + (B0 - D15) -> [p1, 3, (B0 - D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -970,14 +970,14 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match128(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match128, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match128, p1, 3, temp, nullptr);
// B0 + (B1 - D15) -> [b0, 0, (B1 - D15), NULL]
temp = graph()->NewNode(sub_op, b1, d15);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match129(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, nullptr);
// (B0 - D15) + S3 -> [p1, 3, temp, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -985,21 +985,21 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match130(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, nullptr);
// (B0 + B1) + D15 -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match131(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match131, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match131, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match132(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match132, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match132, nullptr, 0, temp, d15);
}
@@ -1101,195 +1101,195 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
// (B0 + B1) -> [B0, 0, B1, NULL]
BaseWithIndexAndDisplacement64Matcher match1(graph()->NewNode(a_op, b0, b1));
- CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, nullptr);
// (B0 + D15) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement64Matcher match2(graph()->NewNode(a_op, b0, d15));
- CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match2, nullptr, 0, b0, d15);
BaseWithIndexAndDisplacement64Matcher match2_32(
graph()->NewNode(a_op, b0, d15_32));
- CheckBaseWithIndexAndDisplacement(&match2_32, NULL, 0, b0, d15_32);
+ CheckBaseWithIndexAndDisplacement(&match2_32, nullptr, 0, b0, d15_32);
// (D15 + B0) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement64Matcher match3(graph()->NewNode(a_op, d15, b0));
- CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match3, nullptr, 0, b0, d15);
// (B0 + M1) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match4(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match5(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match6(graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match6, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match7(graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match7, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match8(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match9(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match10(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match10, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match11(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match11, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match12(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match13(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match14(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match14, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match15(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match15, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match16(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match17(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match18(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match18, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match19(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match19, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match20(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match21(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match22(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match22, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match23(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match23, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match24(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match25(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match26(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match26, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match27(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match27, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match28(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match29(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match30(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match30, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match31(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match31, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match32(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match33(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match34(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match34, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match35(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match35, p1, 3, nullptr, d15);
// 2 INPUT - NEGATIVE CASES
// (M3 + B1) -> [B0, 0, M3, NULL]
BaseWithIndexAndDisplacement64Matcher match36(graph()->NewNode(a_op, b1, m3));
- CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, nullptr);
// (S4 + B1) -> [B0, 0, S4, NULL]
BaseWithIndexAndDisplacement64Matcher match37(graph()->NewNode(a_op, b1, s4));
- CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, nullptr);
// 3 INPUT
@@ -1405,209 +1405,209 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match54(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match54, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match54, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match55(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match55, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match55, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match56(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match56, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match56, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match57(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match57, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match57, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match58(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match58, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match58, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match59(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match59, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match59, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match60(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match60, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match60, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match61(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match61, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match61, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match62(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match62, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match62, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match63(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match63, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match63, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match64(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match64, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match64, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match65(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match65, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match65, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match66(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match66, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match66, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match67(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match67, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match67, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match68(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match68, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match68, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match69(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match69, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match69, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match70(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match70, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match70, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match71(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match71, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match71, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match72(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match72, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match72, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match73(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match73, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match73, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match74(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match74, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match74, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match75(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match75, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match75, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match76(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match76, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match76, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match77(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match77, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match77, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match78(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match78, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match78, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match79(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match79, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match79, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match80(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match80, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match80, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match81(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match81, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match81, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match82(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match82, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match82, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match83(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match83, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match83, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match84(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match84, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match84, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match85(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match85, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match85, p1, 3, nullptr, d15);
// (D15 + S3) + B0 -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1632,7 +1632,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match88(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match88, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match88, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (s3 + b0), d15]
// Avoid changing simple addressing to complex addressing
@@ -1641,7 +1641,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match89(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match89, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match89, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1691,7 +1691,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match95(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match95, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match95, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (b0 + b1), d15]
// Avoid changing simple addressing to complex addressing
@@ -1699,7 +1699,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match96(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match96, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match96, nullptr, 0, temp, d15);
// 5 INPUT - with none-addressing operand uses
@@ -1707,223 +1707,223 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match97(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match97, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match97, b0, 0, m1, nullptr);
// (M1 + B0) -> [b0, 0, m1, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match98(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m1, nullptr);
// (D15 + M1) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match99(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match99, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match99, nullptr, 0, m1, d15);
// (M1 + D15) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match100(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match100, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match100, nullptr, 0, m1, d15);
// (B0 + S0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match101(
graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match101, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match101, b0, 0, s0, nullptr);
// (S0 + B0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match102(
graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s0, nullptr);
// (D15 + S0) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match103(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match103, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match103, nullptr, 0, s0, d15);
// (S0 + D15) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match104(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match104, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match104, nullptr, 0, s0, d15);
// (B0 + M2) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match105(
graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match105, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match105, b0, 0, m2, nullptr);
// (M2 + B0) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match106(
graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m2, nullptr);
// (D15 + M2) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match107(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match107, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match107, nullptr, 0, m2, d15);
// (M2 + D15) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match108(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match108, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match108, nullptr, 0, m2, d15);
// (B0 + S1) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match109(
graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match109, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match109, b0, 0, s1, nullptr);
// (S1 + B0) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match110(
graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s1, nullptr);
// (D15 + S1) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match111(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match111, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match111, nullptr, 0, s1, d15);
// (S1 + D15) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match112(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match112, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match112, nullptr, 0, s1, d15);
// (B0 + M4) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match113(
graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match113, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match113, b0, 0, m4, nullptr);
// (M4 + B0) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match114(
graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m4, nullptr);
// (D15 + M4) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match115(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match115, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match115, nullptr, 0, m4, d15);
// (M4 + D15) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match116(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match116, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match116, nullptr, 0, m4, d15);
// (B0 + S2) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match117(
graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match117, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match117, b0, 0, s2, nullptr);
// (S2 + B0) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match118(
graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s2, nullptr);
// (D15 + S2) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match119(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match119, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match119, nullptr, 0, s2, d15);
// (S2 + D15) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match120(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match120, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match120, nullptr, 0, s2, d15);
// (B0 + M8) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match121(
graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match121, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match121, b0, 0, m8, nullptr);
// (M8 + B0) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match122(
graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match122, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match122, b0, 0, m8, nullptr);
// (D15 + M8) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match123(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match123, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match123, nullptr, 0, m8, d15);
// (M8 + D15) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match124(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match124, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match124, nullptr, 0, m8, d15);
// (B0 + S3) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match125(
graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match125, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match125, b0, 0, s3, nullptr);
// (S3 + B0) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match126(
graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match126, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match126, b0, 0, s3, nullptr);
// (D15 + S3) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match127(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match127, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match127, nullptr, 0, s3, d15);
// (S3 + D15) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match128(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match128, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match128, nullptr, 0, s3, d15);
// (D15 + S3) + B0 -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1931,7 +1931,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match129(
graph()->NewNode(a_op, temp, b0));
- CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, nullptr);
// (B0 + D15) + S3 -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1939,7 +1939,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match130(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, nullptr);
// (S3 + B0) + D15 -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1947,7 +1947,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match131(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match131, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match131, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1955,7 +1955,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match132(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match132, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match132, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1963,7 +1963,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match133(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match133, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match133, b0, 0, temp, nullptr);
// S3 + (B0 + D15) -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1971,7 +1971,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match134(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match134, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match134, p1, 3, temp, nullptr);
// S3 + (B0 - D15) -> [p1, 3, (B0 - D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1979,14 +1979,14 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match135(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match135, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match135, p1, 3, temp, nullptr);
// B0 + (B1 - D15) -> [b0, 0, (B1 - D15), NULL]
temp = graph()->NewNode(sub_op, b1, d15);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match136(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match136, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match136, b0, 0, temp, nullptr);
// (B0 - D15) + S3 -> [p1, 3, temp, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1994,21 +1994,21 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match137(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match137, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match137, p1, 3, temp, nullptr);
// (B0 + B1) + D15 -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match138(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match138, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match138, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match139(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match139, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match139, nullptr, 0, temp, d15);
}
TEST_F(NodeMatcherTest, BranchMatcher_match) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 56f18931b4..0b3d8786f8 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -57,7 +57,7 @@ class TestNodeMatcher : public MatcherInterface<Node*> {
bool MatchAndExplain(Node* node,
MatchResultListener* listener) const override {
- if (node == NULL) {
+ if (node == nullptr) {
*listener << "which is NULL";
return false;
}
@@ -1401,6 +1401,43 @@ class IsBinopMatcher final : public TestNodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
+class IsStringConcatMatcher final : public TestNodeMatcher {
+ public:
+ IsStringConcatMatcher(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher)
+ : TestNodeMatcher(IrOpcode::kStringConcat),
+ length_matcher_(length_matcher),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << " whose length (";
+ length_matcher_.DescribeTo(os);
+ *os << ") and lhs (";
+ lhs_matcher_.DescribeTo(os);
+ *os << ") and rhs (";
+ rhs_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "rhs",
+ rhs_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+};
+
class IsUnopMatcher final : public TestNodeMatcher {
public:
IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
@@ -1910,9 +1947,19 @@ Matcher<Node*> IsTailCall(
IrOpcode::k##opcode, hint_matcher, lhs_matcher, rhs_matcher, \
effect_matcher, control_matcher)); \
}
-SPECULATIVE_BINOPS(DEFINE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DEFINE_SPECULATIVE_BINOP_MATCHER);
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
#undef DEFINE_SPECULATIVE_BINOP_MATCHER
+Matcher<Node*> IsStringConcat(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return MakeMatcher(
+ new IsStringConcatMatcher(length_matcher, lhs_matcher, rhs_matcher));
+}
+
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 30ac330f7f..4e9c32e6d6 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/simplified-operator.h"
#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -35,16 +36,6 @@ class Node;
using ::testing::Matcher;
-#define SPECULATIVE_BINOPS(V) \
- V(SpeculativeNumberAdd) \
- V(SpeculativeNumberSubtract) \
- V(SpeculativeNumberShiftLeft) \
- V(SpeculativeNumberShiftRight) \
- V(SpeculativeNumberShiftRightLogical) \
- V(SpeculativeNumberBitwiseAnd) \
- V(SpeculativeNumberBitwiseOr) \
- V(SpeculativeNumberBitwiseXor)
-
Matcher<Node*> IsDead();
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
@@ -221,7 +212,10 @@ Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher, \
const Matcher<Node*>& effect_matcher, \
const Matcher<Node*>& control_matcher);
-SPECULATIVE_BINOPS(DECLARE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_SPECULATIVE_BINOP_MATCHER);
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
#undef DECLARE_SPECULATIVE_BINOP_MATCHER
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
@@ -272,6 +266,9 @@ Matcher<Node*> IsNumberSqrt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTanh(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringConcat(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsStringFromSingleCharCode(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStringLength(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
new file mode 100644
index 0000000000..f3ecd228a5
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -0,0 +1,1170 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/redundancy-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::NiceMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace redundancy_elimination_unittest {
+
+class RedundancyEliminationTest : public GraphTest {
+ public:
+ explicit RedundancyEliminationTest(int num_parameters = 4)
+ : GraphTest(num_parameters),
+ reducer_(&editor_, zone()),
+ simplified_(zone()) {
+ // Initialize the {reducer_} state for the Start node.
+ reducer_.Reduce(graph()->start());
+
+ // Create a feedback vector with two CALL_IC slots.
+ FeedbackVectorSpec spec(zone());
+ FeedbackSlot slot1 = spec.AddCallICSlot();
+ FeedbackSlot slot2 = spec.AddCallICSlot();
+ Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate()->factory()->empty_string(), Builtins::kIllegal);
+ shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate(), shared);
+ vector_slot_pairs_.push_back(VectorSlotPair());
+ vector_slot_pairs_.push_back(
+ VectorSlotPair(feedback_vector, slot1, UNINITIALIZED));
+ vector_slot_pairs_.push_back(
+ VectorSlotPair(feedback_vector, slot2, UNINITIALIZED));
+ }
+ ~RedundancyEliminationTest() override = default;
+
+ protected:
+ Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+ std::vector<VectorSlotPair> const& vector_slot_pairs() const {
+ return vector_slot_pairs_;
+ }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ NiceMock<MockAdvancedReducerEditor> editor_;
+ std::vector<VectorSlotPair> vector_slot_pairs_;
+ VectorSlotPair feedback2_;
+ RedundancyElimination reducer_;
+ SimplifiedOperatorBuilder simplified_;
+};
+
+namespace {
+
+const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
+ CheckForMinusZeroMode::kCheckForMinusZero,
+ CheckForMinusZeroMode::kDontCheckForMinusZero,
+};
+
+const CheckTaggedInputMode kCheckTaggedInputModes[] = {
+ CheckTaggedInputMode::kNumber, CheckTaggedInputMode::kNumberOrOddball};
+
+const NumberOperationHint kNumberOperationHints[] = {
+ NumberOperationHint::kSignedSmall,
+ NumberOperationHint::kSignedSmallInputs,
+ NumberOperationHint::kSigned32,
+ NumberOperationHint::kNumber,
+ NumberOperationHint::kNumberOrOddball,
+};
+
+} // namespace
+
+// -----------------------------------------------------------------------------
+// CheckBounds
+
+TEST_F(RedundancyEliminationTest, CheckBounds) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* index = Parameter(0);
+ Node* length = Parameter(1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), index, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), index, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckNumber
+
+TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckSmi(feedback1), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckNumber(feedback2), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckString
+
+TEST_F(RedundancyEliminationTest,
+ CheckStringSubsumedByCheckInternalizedString) {
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckInternalizedString(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckString(feedback), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckSymbol
+
+TEST_F(RedundancyEliminationTest, CheckSymbol) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+}
+
+// -----------------------------------------------------------------------------
+// CheckedFloat64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt32(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt32ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedInt64ToInt32(feedback1), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedInt64ToInt32(feedback2), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt64ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedSignedToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToFloat64
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(mode, feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(mode, feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTaggedToFloat64SubsubmedByCheckedTaggedToFloat64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ // If the check passed for CheckTaggedInputMode::kNumber, it'll
+ // also pass later for CheckTaggedInputMode::kNumberOrOddball.
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumber, feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball, feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTaggedToInt32SubsumedByCheckedTaggedSignedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedSignedToInt32(feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToTaggedPointer
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToTaggedPointer(feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToTaggedPointer(feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTruncateTaggedToWord32
+
+TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(mode, feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(mode, feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTruncateTaggedToWord32SubsumedByCheckedTruncateTaggedToWord32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ // If the check passed for CheckTaggedInputMode::kNumber, it'll
+ // also pass later for CheckTaggedInputMode::kNumberOrOddball.
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumber, feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumberOrOddball, feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint32ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint32ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint64ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberEqual
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberEqualWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
+ check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberEqualWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
+ lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberLessThan
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
+ check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
+ lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberLessThanOrEqual
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall, check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberAdd
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberAddWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect = graph()->NewNode(
+ simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberAdd(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect = graph()->NewNode(
+ simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberAdd(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberSubtract
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberSubtractWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberSubtract(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberSubtractWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberSubtract(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeSafeIntegerAdd
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerAdd(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerAdd(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeSafeIntegerSubtract
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
+ lhs, rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerSubtract(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
+ lhs, rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerSubtract(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeToNumber
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeToNumberWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* index = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckBounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* to_number2 = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
+ index, effect, control);
+ Reduction r2 = Reduce(to_number2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(check1));
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* index = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckBounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* to_number2 = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
+ index, effect, control);
+ Reduction r2 = Reduce(to_number2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(index));
+ }
+ }
+ }
+}
+
+} // namespace redundancy_elimination_unittest
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
index 97cafdb6e6..68a7ffea4a 100644
--- a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -18,7 +18,7 @@ namespace compiler {
class SchedulerRPOTest : public TestWithZone {
public:
- SchedulerRPOTest() {}
+ SchedulerRPOTest() = default;
void CheckRPONumbers(BasicBlockVector* order, size_t expected,
bool loops_allowed) {
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 51e954f799..82bcda6e9f 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -25,7 +25,7 @@ class SimplifiedLoweringTest : public GraphTest {
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
&machine_) {}
- ~SimplifiedLoweringTest() override {}
+ ~SimplifiedLoweringTest() override = default;
void LowerGraph(Node* node) {
// Make sure we always start with an empty graph.
@@ -42,7 +42,7 @@ class SimplifiedLoweringTest : public GraphTest {
{
// Simplified lowering needs to run w/o the typer decorator so make sure
// the object is not live at the same time.
- Typer typer(isolate(), js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
typer.Run();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 7913d6398c..5e2f8f15cc 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -25,7 +25,7 @@ class SimplifiedOperatorReducerTest : public GraphTest {
public:
explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
: GraphTest(num_parameters), simplified_(zone()) {}
- ~SimplifiedOperatorReducerTest() override {}
+ ~SimplifiedOperatorReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -54,7 +54,7 @@ class SimplifiedOperatorReducerTestWithParam
public:
explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
: SimplifiedOperatorReducerTest(num_parameters) {}
- ~SimplifiedOperatorReducerTestWithParam() override {}
+ ~SimplifiedOperatorReducerTestWithParam() override = default;
};
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 86600aeffe..51426a5f85 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -28,7 +28,7 @@ class TypedOptimizationTest : public TypedGraphTest {
public:
TypedOptimizationTest()
: TypedGraphTest(3), simplified_(zone()), deps_(isolate(), zone()) {}
- ~TypedOptimizationTest() override {}
+ ~TypedOptimizationTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 53459c314a..b827088336 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -23,7 +23,7 @@ class TyperTest : public TypedGraphTest {
TyperTest()
: TypedGraphTest(3),
js_heap_broker_(isolate(), zone()),
- operation_typer_(isolate(), &js_heap_broker_, zone()),
+ operation_typer_(&js_heap_broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
simplified_(zone()) {
@@ -434,7 +434,6 @@ TEST_F(TyperTest, TypeJSStrictEqual) {
TEST_F(TyperTest, Monotonicity_##name) { \
TestUnaryMonotonicity(javascript_.name()); \
}
-TEST_MONOTONICITY(ToInteger)
TEST_MONOTONICITY(ToLength)
TEST_MONOTONICITY(ToName)
TEST_MONOTONICITY(ToNumber)
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index d137d68ee9..c4d46b2e7a 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -34,7 +34,7 @@ class MockHistogram : public Histogram {
class AggregatedMemoryHistogramTest : public ::testing::Test {
public:
AggregatedMemoryHistogramTest() : aggregated_(&mock_) {}
- virtual ~AggregatedMemoryHistogramTest() {}
+ ~AggregatedMemoryHistogramTest() override = default;
void AddSample(double current_ms, double current_value) {
aggregated_.AddSample(current_ms, current_value);
@@ -66,7 +66,7 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
stats()->Reset();
}
- ~RuntimeCallStatsTest() {
+ ~RuntimeCallStatsTest() override {
// Disable RuntimeCallStats before tearing down the isolate to prevent
// printing the tests table. Comment the following line for debugging
// purposes.
diff --git a/deps/v8/test/unittests/heap/bitmap-unittest.cc b/deps/v8/test/unittests/heap/bitmap-unittest.cc
index a84437d534..1ecab4dd72 100644
--- a/deps/v8/test/unittests/heap/bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/bitmap-unittest.cc
@@ -20,7 +20,7 @@ class BitmapTest : public ::testing::Test {
memset(memory_, 0, Bitmap::kSize);
}
- virtual ~BitmapTest() { delete[] memory_; }
+ ~BitmapTest() override { delete[] memory_; }
Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
uint8_t* raw_bitmap() { return memory_; }
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index ac2cb3e2ee..33cc05e692 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -33,7 +33,6 @@ class MockEmbedderHeapTracer : public EmbedderHeapTracer {
public:
MOCK_METHOD0(TracePrologue, void());
MOCK_METHOD0(TraceEpilogue, void());
- MOCK_METHOD0(AbortTracing, void());
MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
MOCK_METHOD0(IsTracingDone, bool());
MOCK_METHOD1(RegisterV8References,
@@ -76,24 +75,6 @@ TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
local_tracer.TraceEpilogue();
}
-TEST(LocalEmbedderHeapTracer, AbortTracingForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, AbortTracing());
- local_tracer.AbortTracing();
-}
-
-TEST(LocalEmbedderHeapTracer, AbortTracingClearsCachedWrappers) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.AddWrapperToTrace(CreateWrapperInfo());
- EXPECT_CALL(remote_tracer, AbortTracing());
- local_tracer.AbortTracing();
- EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
-}
-
TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 573be833af..7063b2a280 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -14,8 +14,8 @@ namespace {
class GCIdleTimeHandlerTest : public ::testing::Test {
public:
- GCIdleTimeHandlerTest() {}
- virtual ~GCIdleTimeHandlerTest() {}
+ GCIdleTimeHandlerTest() = default;
+ ~GCIdleTimeHandlerTest() override = default;
GCIdleTimeHandler* handler() { return &handler_; }
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 4ac80ab6fe..ac18e1817b 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -499,7 +499,7 @@ TEST_F(GCTracerTest, RecordMarkCompactHistograms) {
tracer->current_.scopes[GCTracer::Scope::MC_MARK] = 5;
tracer->current_.scopes[GCTracer::Scope::MC_PROLOGUE] = 6;
tracer->current_.scopes[GCTracer::Scope::MC_SWEEP] = 7;
- tracer->RecordMarkCompactHistograms(i_isolate()->counters()->gc_finalize());
+ tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_finalize());
EXPECT_EQ(1, GcHistogram::Get("V8.GCFinalizeMC.Clear")->Total());
EXPECT_EQ(2, GcHistogram::Get("V8.GCFinalizeMC.Epilogue")->Total());
EXPECT_EQ(3, GcHistogram::Get("V8.GCFinalizeMC.Evacuate")->Total());
@@ -510,5 +510,19 @@ TEST_F(GCTracerTest, RecordMarkCompactHistograms) {
GcHistogram::CleanUp();
}
+TEST_F(GCTracerTest, RecordScavengerHistograms) {
+ if (FLAG_stress_incremental_marking) return;
+ isolate()->SetCreateHistogramFunction(&GcHistogram::CreateHistogram);
+ isolate()->SetAddHistogramSampleFunction(&GcHistogram::AddHistogramSample);
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS] = 1;
+ tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL] = 2;
+ tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_scavenger());
+ EXPECT_EQ(1, GcHistogram::Get("V8.GCScavenger.ScavengeRoots")->Total());
+ EXPECT_EQ(2, GcHistogram::Get("V8.GCScavenger.ScavengeMain")->Total());
+ GcHistogram::CleanUp();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index b2446afa84..42db9c4ba0 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -34,8 +34,8 @@ void CheckEqualRounded(double expected, double actual) {
TEST_F(HeapControllerTest, HeapGrowingFactor) {
HeapController heap_controller(i_isolate()->heap());
- double min_factor = heap_controller.kMinGrowingFactor;
- double max_factor = heap_controller.kMaxGrowingFactor;
+ double min_factor = heap_controller.min_growing_factor_;
+ double max_factor = heap_controller.max_growing_factor_;
CheckEqualRounded(max_factor, heap_controller.GrowingFactor(34, 1, 4.0));
CheckEqualRounded(3.553, heap_controller.GrowingFactor(45, 1, 4.0));
@@ -51,15 +51,15 @@ TEST_F(HeapControllerTest, HeapGrowingFactor) {
TEST_F(HeapControllerTest, MaxHeapGrowingFactor) {
HeapController heap_controller(i_isolate()->heap());
CheckEqualRounded(
- 1.3, heap_controller.MaxGrowingFactor(heap_controller.kMinSize * MB));
+ 1.3, heap_controller.MaxGrowingFactor(HeapController::kMinSize * MB));
CheckEqualRounded(1.600, heap_controller.MaxGrowingFactor(
- heap_controller.kMaxSize / 2 * MB));
+ HeapController::kMaxSize / 2 * MB));
CheckEqualRounded(
1.999, heap_controller.MaxGrowingFactor(
- (heap_controller.kMaxSize - Heap::kPointerMultiplier) * MB));
+ (HeapController::kMaxSize - Heap::kPointerMultiplier) * MB));
CheckEqualRounded(4.0,
heap_controller.MaxGrowingFactor(
- static_cast<size_t>(heap_controller.kMaxSize) * MB));
+ static_cast<size_t>(HeapController::kMaxSize) * MB));
}
TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
@@ -75,39 +75,43 @@ TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
double factor =
heap_controller.GrowingFactor(gc_speed, mutator_speed, max_factor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kDefault));
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kDefault));
- factor = Min(factor, heap_controller.kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kSlow));
-
- factor = Min(factor, heap_controller.kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kConservative));
+ factor = Min(factor, heap_controller.conservative_growing_factor_);
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kSlow));
- factor = heap_controller.kMinGrowingFactor;
+ factor = Min(factor, heap_controller.conservative_growing_factor_);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kMinimal));
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity,
+ Heap::HeapGrowingMode::kConservative));
+
+ factor = heap_controller.min_growing_factor_;
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kMinimal));
}
TEST_F(HeapControllerTest, MaxOldGenerationSize) {
HeapController heap_controller(i_isolate()->heap());
uint64_t configurations[][2] = {
- {0, heap_controller.kMinSize},
- {512, heap_controller.kMinSize},
+ {0, HeapController::kMinSize},
+ {512, HeapController::kMinSize},
{1 * GB, 256 * Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), heap_controller.kMaxSize},
- {8 * static_cast<uint64_t>(GB), heap_controller.kMaxSize}};
+ {4 * static_cast<uint64_t>(GB), HeapController::kMaxSize},
+ {8 * static_cast<uint64_t>(GB), HeapController::kMaxSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 3f08278d13..dd14e22d54 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -59,5 +59,17 @@ TEST_F(HeapTest, ASLR) {
#endif // V8_TARGET_ARCH_X64
}
+TEST_F(HeapTest, ExternalLimitDefault) {
+ Heap* heap = i_isolate()->heap();
+ EXPECT_EQ(kExternalAllocationSoftLimit, heap->external_memory_limit_);
+}
+
+TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
+ v8_isolate()->AdjustAmountOfExternalAllocatedMemory(+10 * MB);
+ v8_isolate()->AdjustAmountOfExternalAllocatedMemory(-10 * MB);
+ Heap* heap = i_isolate()->heap();
+ EXPECT_GE(heap->external_memory_limit_, kExternalAllocationSoftLimit);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index adeae2b593..36d99a31ba 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -151,7 +151,7 @@ class TaskForDifferentItems;
class BaseItem : public ItemParallelJob::Item {
public:
- virtual ~BaseItem() {}
+ ~BaseItem() override = default;
virtual void ProcessItem(TaskForDifferentItems* task) = 0;
};
@@ -162,7 +162,7 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
: ItemParallelJob::Task(isolate),
processed_a_(processed_a),
processed_b_(processed_b) {}
- virtual ~TaskForDifferentItems() {}
+ ~TaskForDifferentItems() override = default;
void RunInParallel() override {
BaseItem* item = nullptr;
@@ -182,13 +182,13 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
class ItemA : public BaseItem {
public:
- virtual ~ItemA() {}
+ ~ItemA() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
};
class ItemB : public BaseItem {
public:
- virtual ~ItemB() {}
+ ~ItemB() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
};
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index d81b7e1413..5266e54e09 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -16,11 +16,11 @@ typedef TestWithIsolate SpacesTest;
TEST_F(SpacesTest, CompactionSpaceMerge) {
Heap* heap = i_isolate()->heap();
OldSpace* old_space = heap->old_space();
- EXPECT_TRUE(old_space != NULL);
+ EXPECT_TRUE(old_space != nullptr);
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- EXPECT_TRUE(compaction_space != NULL);
+ EXPECT_TRUE(compaction_space != nullptr);
for (Page* p : *old_space) {
// Unlink free lists from the main space to avoid reusing the memory for
@@ -118,9 +118,9 @@ TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
// Create code ranges.
- void* code_range1 = hint.GetAddressHint(100);
- void* code_range2 = hint.GetAddressHint(200);
- void* code_range3 = hint.GetAddressHint(100);
+ Address code_range1 = hint.GetAddressHint(100);
+ Address code_range2 = hint.GetAddressHint(200);
+ Address code_range3 = hint.GetAddressHint(100);
// Since the addresses are random, we cannot check that they are different.
@@ -129,14 +129,14 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
hint.NotifyFreedCodeRange(code_range2, 200);
// The next two code ranges should reuse the freed addresses.
- void* code_range4 = hint.GetAddressHint(100);
+ Address code_range4 = hint.GetAddressHint(100);
EXPECT_EQ(code_range4, code_range1);
- void* code_range5 = hint.GetAddressHint(200);
+ Address code_range5 = hint.GetAddressHint(200);
EXPECT_EQ(code_range5, code_range2);
// Free the third code range and check address reuse.
hint.NotifyFreedCodeRange(code_range3, 100);
- void* code_range6 = hint.GetAddressHint(100);
+ Address code_range6 = hint.GetAddressHint(100);
EXPECT_EQ(code_range6, code_range3);
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 5030d3897d..a2c8d94793 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -22,8 +22,8 @@ namespace interpreter {
class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayBuilderTest() {}
- ~BytecodeArrayBuilderTest() override {}
+ BytecodeArrayBuilderTest() = default;
+ ~BytecodeArrayBuilderTest() override = default;
};
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
@@ -134,9 +134,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit load / store property operations.
builder.LoadNamedProperty(reg, name, load_slot.ToInt())
+ .LoadNamedPropertyNoFeedback(reg, name)
.LoadKeyedProperty(reg, keyed_load_slot.ToInt())
.StoreNamedProperty(reg, name, sloppy_store_slot.ToInt(),
LanguageMode::kSloppy)
+ .StoreNamedPropertyNoFeedback(reg, name, LanguageMode::kStrict)
+ .StoreNamedPropertyNoFeedback(reg, name, LanguageMode::kSloppy)
.StoreKeyedProperty(reg, reg, sloppy_keyed_store_slot.ToInt(),
LanguageMode::kSloppy)
.StoreNamedProperty(reg, name, strict_store_slot.ToInt(),
@@ -194,7 +197,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CallRuntime(Runtime::kIsArray, reg)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg_list, pair)
.CallJSRuntime(Context::OBJECT_CREATE, reg_list)
- .CallWithSpread(reg, reg_list, 1);
+ .CallWithSpread(reg, reg_list, 1)
+ .CallNoFeedback(reg, reg_list);
// Emit binary operator invocations.
builder.BinaryOperation(Token::Value::ADD, reg, 1)
@@ -375,6 +379,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateRegExpLiteral(ast_factory.GetOneByteString("wide_literal"), 0, 0)
.CreateArrayLiteral(0, 0, 0)
.CreateEmptyArrayLiteral(0)
+ .CreateArrayFromIterable()
.CreateObjectLiteral(0, 0, 0, reg)
.CreateEmptyObjectLiteral()
.CloneObject(reg, 0, 0);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index f7c89e2869..69d0e96507 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -16,8 +16,8 @@ namespace interpreter {
class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayIteratorTest() {}
- ~BytecodeArrayIteratorTest() override {}
+ BytecodeArrayIteratorTest() = default;
+ ~BytecodeArrayIteratorTest() override = default;
};
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 8d2cd4c501..71c79300f3 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -16,8 +16,8 @@ namespace interpreter {
class BytecodeArrayRandomIteratorTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayRandomIteratorTest() {}
- ~BytecodeArrayRandomIteratorTest() override {}
+ BytecodeArrayRandomIteratorTest() = default;
+ ~BytecodeArrayRandomIteratorTest() override = default;
};
TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 5eb4d3be9a..7c01228936 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -34,7 +34,7 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
bytecode_array_writer_(
zone(), &constant_array_builder_,
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
- ~BytecodeArrayWriterUnittest() override {}
+ ~BytecodeArrayWriterUnittest() override = default;
void Write(Bytecode bytecode, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 018263f06b..eb4fdbb745 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -49,10 +49,10 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
3,
0,
" ForInPrepare r10-r12, [11]"},
- {{B(CallRuntime), U16(Runtime::FunctionId::kIsDate), R8(0), U8(0)},
+ {{B(CallRuntime), U16(Runtime::FunctionId::kIsSmi), R8(0), U8(0)},
5,
0,
- " CallRuntime [IsDate], r0-r0"},
+ " CallRuntime [IsSmi], r0-r0"},
{{B(Ldar),
static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
2,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index b2c8b47c79..2ba28b2306 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -16,7 +16,7 @@ namespace interpreter {
class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
public:
BytecodeRegisterAllocatorTest() : allocator_(0) {}
- ~BytecodeRegisterAllocatorTest() override {}
+ ~BytecodeRegisterAllocatorTest() override = default;
BytecodeRegisterAllocator* allocator() { return &allocator_; }
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 9e3ceb140f..9879b2a84a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -23,7 +23,7 @@ class BytecodeRegisterOptimizerTest
Register output;
};
- BytecodeRegisterOptimizerTest() {}
+ BytecodeRegisterOptimizerTest() = default;
~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
void Initialize(int number_of_parameters, int number_of_locals) {
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 59e228a29c..46bbb900c0 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -18,8 +18,8 @@ namespace interpreter {
class ConstantArrayBuilderTest : public TestWithIsolateAndZone {
public:
- ConstantArrayBuilderTest() {}
- ~ConstantArrayBuilderTest() override {}
+ ConstantArrayBuilderTest() = default;
+ ~ConstantArrayBuilderTest() override = default;
static const size_t k8BitCapacity = ConstantArrayBuilder::k8BitCapacity;
static const size_t k16BitCapacity = ConstantArrayBuilder::k16BitCapacity;
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 669db93040..cec661b468 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -28,8 +28,8 @@ class InterpreterAssemblerTestState : public compiler::CodeAssemblerState {
class InterpreterAssemblerTest : public TestWithIsolateAndZone {
public:
- InterpreterAssemblerTest() {}
- ~InterpreterAssemblerTest() override {}
+ InterpreterAssemblerTest() = default;
+ ~InterpreterAssemblerTest() override = default;
class InterpreterAssemblerForTest final : public InterpreterAssembler {
public:
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
index 031eb9efbd..cb219a4737 100644
--- a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -17,13 +17,15 @@ namespace default_platform_unittest {
namespace {
struct MockTask : public Task {
- virtual ~MockTask() { Die(); }
+ // See issue v8:8185
+ ~MockTask() /* override */ { Die(); }
MOCK_METHOD0(Run, void());
MOCK_METHOD0(Die, void());
};
struct MockIdleTask : public IdleTask {
- virtual ~MockIdleTask() { Die(); }
+ // See issue v8:8185
+ ~MockIdleTask() /* override */ { Die(); }
MOCK_METHOD1(Run, void(double deadline_in_seconds));
MOCK_METHOD0(Die, void());
};
@@ -242,10 +244,10 @@ class TestBackgroundTask : public Task {
explicit TestBackgroundTask(base::Semaphore* sem, bool* executed)
: sem_(sem), executed_(executed) {}
- virtual ~TestBackgroundTask() { Die(); }
+ ~TestBackgroundTask() override { Die(); }
MOCK_METHOD0(Die, void());
- void Run() {
+ void Run() override {
*executed_ = true;
sem_->Signal();
}
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index a42b37aa7c..0caad1ef22 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -17,7 +17,8 @@ namespace platform {
namespace {
struct MockTask : public Task {
- virtual ~MockTask() { Die(); }
+ // See issue v8:8185
+ ~MockTask() /* override */ { Die(); }
MOCK_METHOD0(Run, void());
MOCK_METHOD0(Die, void());
};
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index ad8d631961..505d76df8b 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -81,8 +81,8 @@ TEST(Object, StructListOrder) {
int last = current - 1;
ASSERT_LT(0, last);
InstanceType current_type = static_cast<InstanceType>(current);
-#define TEST_STRUCT(type, class, name) \
- current_type = InstanceType::type##_TYPE; \
+#define TEST_STRUCT(TYPE, class, name) \
+ current_type = InstanceType::TYPE; \
current = static_cast<int>(current_type); \
EXPECT_EQ(last + 1, current) \
<< " STRUCT_LIST is not ordered: " \
diff --git a/deps/v8/test/unittests/objects/microtask-queue-unittest.cc b/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
new file mode 100644
index 0000000000..2b237ebc50
--- /dev/null
+++ b/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/microtask-queue-inl.h"
+
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+
+void NoopCallback(void*) {}
+
+class MicrotaskQueueTest : public TestWithIsolate {
+ public:
+ Handle<Microtask> NewMicrotask() {
+ MicrotaskCallback callback = &NoopCallback;
+ void* data = nullptr;
+ return factory()->NewCallbackTask(
+ factory()->NewForeign(reinterpret_cast<Address>(callback)),
+ factory()->NewForeign(reinterpret_cast<Address>(data)));
+ }
+};
+
+TEST_F(MicrotaskQueueTest, EnqueueMicrotask) {
+ Handle<MicrotaskQueue> microtask_queue = factory()->NewMicrotaskQueue();
+ Handle<Microtask> microtask = NewMicrotask();
+
+ EXPECT_EQ(0, microtask_queue->pending_microtask_count());
+ MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
+ EXPECT_EQ(1, microtask_queue->pending_microtask_count());
+ ASSERT_LE(1, microtask_queue->queue()->length());
+ EXPECT_EQ(*microtask, microtask_queue->queue()->get(0));
+
+ std::vector<Handle<Microtask>> microtasks;
+ microtasks.push_back(microtask);
+
+ // Queue microtasks until the reallocation happens.
+ int queue_capacity = microtask_queue->queue()->length();
+ for (int i = 0; i < queue_capacity; ++i) {
+ microtask = NewMicrotask();
+ MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
+ microtasks.push_back(microtask);
+ }
+
+ int num_tasks = static_cast<int>(microtasks.size());
+ EXPECT_EQ(num_tasks, microtask_queue->pending_microtask_count());
+ ASSERT_LE(num_tasks, microtask_queue->queue()->length());
+ for (int i = 0; i < num_tasks; ++i) {
+ EXPECT_EQ(*microtasks[i], microtask_queue->queue()->get(i));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index f20fbb2cee..ee5590e3f1 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -13,7 +13,7 @@ namespace internal {
class PreParserTest : public TestWithNativeContext {
public:
- PreParserTest() {}
+ PreParserTest() = default;
private:
DISALLOW_COPY_AND_ASSIGN(PreParserTest);
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
index 0688a5e54e..f0da8a5b93 100644
--- a/deps/v8/test/unittests/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -14,8 +14,8 @@ const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
class RegisterConfigurationUnitTest : public ::testing::Test {
public:
- RegisterConfigurationUnitTest() {}
- virtual ~RegisterConfigurationUnitTest() {}
+ RegisterConfigurationUnitTest() = default;
+ ~RegisterConfigurationUnitTest() override = default;
};
TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index f353e83ecf..712770e9dc 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -11,18 +11,18 @@ namespace {
class DefaultPlatformEnvironment final : public ::testing::Environment {
public:
- DefaultPlatformEnvironment() {}
+ DefaultPlatformEnvironment() = default;
void SetUp() override {
platform_ = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled);
- ASSERT_TRUE(platform_.get() != NULL);
+ ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::InitializePlatform(platform_.get());
ASSERT_TRUE(v8::V8::Initialize());
}
void TearDown() override {
- ASSERT_TRUE(platform_.get() != NULL);
+ ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
}
diff --git a/deps/v8/test/unittests/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index 1ad6dec006..23fd1a95d2 100644
--- a/deps/v8/test/unittests/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -14,8 +14,8 @@ namespace interpreter {
class SourcePositionTableTest : public TestWithIsolate {
public:
- SourcePositionTableTest() {}
- ~SourcePositionTableTest() override {}
+ SourcePositionTableTest() = default;
+ ~SourcePositionTableTest() override = default;
SourcePosition toPos(int offset) {
return SourcePosition(offset, offset % 10 - 1);
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index c771906dc2..1ff25337e4 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -6,10 +6,13 @@
#include "include/v8.h"
#include "src/api.h"
+#include "src/base/template-utils.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
namespace v8 {
namespace internal {
@@ -17,13 +20,13 @@ namespace test {
Handle<String> CreateSource(Isolate* isolate,
ExternalOneByteString::Resource* maybe_resource) {
- static const char test_script[] = "(x) { x*x; }";
- if (maybe_resource) {
- return isolate->factory()
- ->NewExternalStringFromOneByte(maybe_resource)
- .ToHandleChecked();
+ if (!maybe_resource) {
+ static const char test_script[] = "(x) { x*x; }";
+ maybe_resource = new test::ScriptResource(test_script, strlen(test_script));
}
- return isolate->factory()->NewStringFromAsciiChecked(test_script);
+ return isolate->factory()
+ ->NewExternalStringFromOneByte(maybe_resource)
+ .ToHandleChecked();
}
Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
@@ -51,6 +54,23 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
return scope.CloseAndEscape(shared);
}
+std::unique_ptr<ParseInfo> OuterParseInfoForShared(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ Handle<Script> script =
+ Handle<Script>::cast(handle(shared->script(), isolate));
+ std::unique_ptr<ParseInfo> result =
+ base::make_unique<ParseInfo>(isolate, script);
+
+ // Create a character stream to simulate the parser having done so for the
+ // to-level ParseProgram.
+ Handle<String> source(String::cast(script->source()), isolate);
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(isolate, source));
+ result->set_character_stream(std::move(stream));
+
+ return result;
+}
+
} // namespace test
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index 223b22e38e..fadc0c3e2b 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -46,6 +46,8 @@ Handle<String> CreateSource(
Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Isolate* isolate,
v8::String::ExternalOneByteStringResource* maybe_resource);
+std::unique_ptr<ParseInfo> OuterParseInfoForShared(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
} // namespace test
} // namespace internal
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 2b099e0ea5..32f405764d 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -24,32 +24,28 @@ Isolate* TestWithIsolate::isolate_ = nullptr;
TestWithIsolate::TestWithIsolate()
: isolate_scope_(isolate()), handle_scope_(isolate()) {}
-
-TestWithIsolate::~TestWithIsolate() {}
-
+TestWithIsolate::~TestWithIsolate() = default;
// static
void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
- EXPECT_EQ(NULL, isolate_);
- // Make BigInt64Array / BigUint64Array available for testing.
- i::FLAG_harmony_bigint = true;
+ EXPECT_EQ(nullptr, isolate_);
v8::Isolate::CreateParams create_params;
array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
isolate_ = v8::Isolate::New(create_params);
- EXPECT_TRUE(isolate_ != NULL);
+ EXPECT_TRUE(isolate_ != nullptr);
}
// static
void TestWithIsolate::TearDownTestCase() {
- ASSERT_TRUE(isolate_ != NULL);
+ ASSERT_TRUE(isolate_ != nullptr);
v8::Platform* platform = internal::V8::GetCurrentPlatform();
- ASSERT_TRUE(platform != NULL);
+ ASSERT_TRUE(platform != nullptr);
while (platform::PumpMessageLoop(platform, isolate_)) continue;
isolate_->Dispose();
- isolate_ = NULL;
+ isolate_ = nullptr;
delete array_buffer_allocator_;
Test::TearDownTestCase();
}
@@ -64,10 +60,20 @@ Local<Value> TestWithIsolate::RunJS(const char* source) {
return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
}
+Local<Value> TestWithIsolate::RunJS(
+ String::ExternalOneByteStringResource* source) {
+ Local<Script> script =
+ v8::Script::Compile(
+ isolate()->GetCurrentContext(),
+ v8::String::NewExternalOneByte(isolate(), source).ToLocalChecked())
+ .ToLocalChecked();
+ return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
+}
+
TestWithContext::TestWithContext()
: context_(Context::New(isolate())), context_scope_(context_) {}
-TestWithContext::~TestWithContext() {}
+TestWithContext::~TestWithContext() = default;
v8::Local<v8::String> TestWithContext::NewString(const char* string) {
return v8::String::NewFromUtf8(v8_isolate(), string,
@@ -85,9 +91,9 @@ void TestWithContext::SetGlobalProperty(const char* name,
namespace internal {
-TestWithIsolate::~TestWithIsolate() {}
+TestWithIsolate::~TestWithIsolate() = default;
-TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
+TestWithIsolateAndZone::~TestWithIsolateAndZone() = default;
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
@@ -95,13 +101,18 @@ Handle<Object> TestWithIsolate::RunJSInternal(const char* source) {
return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
}
+Handle<Object> TestWithIsolate::RunJSInternal(
+ ::v8::String::ExternalOneByteStringResource* source) {
+ return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
+}
+
base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
return isolate()->random_number_generator();
}
-TestWithZone::~TestWithZone() {}
+TestWithZone::~TestWithZone() = default;
-TestWithNativeContext::~TestWithNativeContext() {}
+TestWithNativeContext::~TestWithNativeContext() = default;
Handle<Context> TestWithNativeContext::native_context() const {
return isolate()->native_context();
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index c361810219..289ef5edf2 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -26,7 +26,7 @@ class ArrayBufferAllocator;
class TestWithIsolate : public virtual ::testing::Test {
public:
TestWithIsolate();
- virtual ~TestWithIsolate();
+ ~TestWithIsolate() override;
v8::Isolate* isolate() const { return v8_isolate(); }
@@ -37,6 +37,7 @@ class TestWithIsolate : public virtual ::testing::Test {
}
Local<Value> RunJS(const char* source);
+ Local<Value> RunJS(String::ExternalOneByteStringResource* source);
static void SetUpTestCase();
static void TearDownTestCase();
@@ -55,7 +56,7 @@ class TestWithIsolate : public virtual ::testing::Test {
class TestWithContext : public virtual v8::TestWithIsolate {
public:
TestWithContext();
- virtual ~TestWithContext();
+ ~TestWithContext() override;
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
@@ -78,8 +79,8 @@ class Factory;
class TestWithIsolate : public virtual ::v8::TestWithIsolate {
public:
- TestWithIsolate() {}
- virtual ~TestWithIsolate();
+ TestWithIsolate() = default;
+ ~TestWithIsolate() override;
Factory* factory() const;
Isolate* isolate() const { return i_isolate(); }
@@ -88,6 +89,13 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
return Handle<T>::cast(RunJSInternal(source));
}
Handle<Object> RunJSInternal(const char* source);
+ template <typename T = Object>
+ Handle<T> RunJS(::v8::String::ExternalOneByteStringResource* source) {
+ return Handle<T>::cast(RunJSInternal(source));
+ }
+ Handle<Object> RunJSInternal(
+ ::v8::String::ExternalOneByteStringResource* source);
+
base::RandomNumberGenerator* random_number_generator() const;
private:
@@ -97,7 +105,7 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
class TestWithZone : public virtual ::testing::Test {
public:
TestWithZone() : zone_(&allocator_, ZONE_NAME) {}
- virtual ~TestWithZone();
+ ~TestWithZone() override;
Zone* zone() { return &zone_; }
@@ -111,7 +119,7 @@ class TestWithZone : public virtual ::testing::Test {
class TestWithIsolateAndZone : public virtual TestWithIsolate {
public:
TestWithIsolateAndZone() : zone_(&allocator_, ZONE_NAME) {}
- virtual ~TestWithIsolateAndZone();
+ ~TestWithIsolateAndZone() override;
Zone* zone() { return &zone_; }
@@ -125,8 +133,8 @@ class TestWithIsolateAndZone : public virtual TestWithIsolate {
class TestWithNativeContext : public virtual ::v8::TestWithContext,
public virtual TestWithIsolate {
public:
- TestWithNativeContext() {}
- virtual ~TestWithNativeContext();
+ TestWithNativeContext() = default;
+ ~TestWithNativeContext() override;
Handle<Context> native_context() const;
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index d44d4b4e33..f0eef446d1 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -15,4 +15,9 @@
'RandomNumberGenerator.NextSampleSlowInvalidParam1': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
}], # 'system == macos and asan'
+
+['(arch == arm or arch == mips) and not simulator_run', {
+ # Uses too much memory.
+ 'Parameterized/WasmCodeManagerTest.GrowingVsFixedModule/Fixed': [SKIP]
+}], # '(arch == arm or arch == mips) and not simulator_run'
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 77f609052a..2cc0bdc8a6 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -58,7 +58,7 @@ class ValueSerializerTest : public TestWithIsolate {
isolate_ = reinterpret_cast<i::Isolate*>(isolate());
}
- ~ValueSerializerTest() {
+ ~ValueSerializerTest() override {
// In some cases unhandled scheduled exceptions from current test produce
// that Context::New(isolate()) from next test's constructor returns NULL.
// In order to prevent that, we added destructor which will clear scheduled
@@ -228,7 +228,7 @@ class ValueSerializerTest : public TestWithIsolate {
Local<Script> script =
Script::Compile(deserialization_context_, source).ToLocalChecked();
Local<Value> value = script->Run(deserialization_context_).ToLocalChecked();
- EXPECT_TRUE(value->BooleanValue(deserialization_context_).FromJust());
+ EXPECT_TRUE(value->BooleanValue(isolate()));
}
Local<String> StringFromUtf8(const char* source) {
@@ -1870,6 +1870,22 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
}
+TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty1) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x49, 0x02, 0x24, 0x01, 0x03}),
+ ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+}
+
+TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty2) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x6f, 0x7b, 0x00, 0x24, 0x01, 0x03}),
+ ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+}
+
TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
// Byte offset out of range.
InvalidDecodeTest(
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 627a9da3ee..e2a7bcc388 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -674,7 +674,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
}
TEST_F(DecoderTest, FailOnNullData) {
- decoder.Reset(nullptr, 0);
+ decoder.Reset(nullptr, nullptr);
decoder.checkAvailable(1);
EXPECT_FALSE(decoder.ok());
EXPECT_FALSE(decoder.toResult(nullptr).ok());
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 771c61e237..31e4a12ae7 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -2403,34 +2403,29 @@ TEST_F(FunctionBodyDecoderTest, Throw) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
-
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_i());
- AddLocals(kWasmI32, 1);
-
- EXPECT_VERIFIES(v_v, kExprThrow, 0);
-
- // exception index out of range.
- EXPECT_FAILURE(v_v, kExprThrow, 2);
-
- EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, 1);
-
- // TODO(kschimpf): Add more tests.
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_i());
+ byte ex3 = builder.AddException(sigs.v_ii());
+ EXPECT_VERIFIES(v_v, kExprThrow, ex1);
+ EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, ex2);
+ EXPECT_FAILURE(v_v, WASM_F32(0.0), kExprThrow, ex2);
+ EXPECT_VERIFIES(v_v, WASM_I32V(0), WASM_I32V(0), kExprThrow, ex3);
+ EXPECT_FAILURE(v_v, WASM_F32(0.0), WASM_I32V(0), kExprThrow, ex3);
+ EXPECT_FAILURE(v_v, kExprThrow, 99);
}
TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
- // TODO(titzer): unreachable code after throw should validate.
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
-
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_i());
- AddLocals(kWasmI32, 1);
- EXPECT_VERIFIES(i_i, kExprThrow, 0, WASM_GET_LOCAL(0));
-
- // TODO(kschimpf): Add more (block-level) tests of unreachable to see
- // if they validate.
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_i());
+ EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_NOP);
+ EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_NOP);
+ EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_ZERO);
+ EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_ZERO);
+ EXPECT_FAILURE(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_F32(0.0));
+ EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_F32(0.0));
}
#define WASM_TRY_OP kExprTry, kLocalVoid
@@ -2438,24 +2433,30 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
TEST_F(FunctionBodyDecoderTest, TryCatch) {
WASM_FEATURE_SCOPE(eh);
-
TestModuleBuilder builder;
module = builder.module();
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_v());
-
- // TODO(kschimpf): Need to fix catch to use declared exception.
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(0), kExprEnd);
-
- // Missing catch.
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd);
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_v());
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd); // Missing catch.
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1)); // Missing end.
+ EXPECT_FAILURE(v_v, WASM_CATCH(ex1), kExprEnd); // Missing try.
- // Missing end.
- EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0));
+ // TODO(mstarzinger): Double catch. Fix this to verify.
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1), WASM_CATCH(ex2), kExprEnd);
+}
- // Double catch.
- // TODO(kschimpf): Fix this to verify.
- EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0), WASM_CATCH(1), kExprEnd);
+TEST_F(FunctionBodyDecoderTest, TryCatchAll) {
+ WASM_FEATURE_SCOPE(eh);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte ex1 = builder.AddException(sigs.v_v());
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatchAll, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprCatchAll, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, kExprCatchAll, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, WASM_CATCH(ex1), kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll); // Missing end.
+ EXPECT_FAILURE(v_v, kExprCatchAll, kExprEnd); // Missing try.
}
#undef WASM_TRY_OP
@@ -3132,6 +3133,20 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kWasmI64, 212);
}
+TEST_F(LocalDeclDecoderTest, ExceptRef) {
+ WASM_FEATURE_SCOPE(eh);
+ ValueType type = kWasmExceptRef;
+ const byte data[] = {1, 1,
+ static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
+ BodyLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(1u, decls.type_list.size());
+
+ TypesOfLocals map = decls.type_list;
+ EXPECT_EQ(type, map[0]);
+}
+
class BytecodeIteratorTest : public TestWithZone {};
TEST_F(BytecodeIteratorTest, SimpleForeach) {
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 3507f897f9..83876b3e0f 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -61,7 +61,7 @@ namespace module_decoder_unittest {
ModuleResult result = DecodeModule((data), (data) + sizeof((data))); \
EXPECT_FALSE(result.ok()); \
EXPECT_EQ(0u, result.val->exceptions.size()); \
- } while (0)
+ } while (false)
#define X1(...) __VA_ARGS__
#define X2(...) __VA_ARGS__, __VA_ARGS__
@@ -207,7 +207,7 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- ModuleResult result = DecodeModule(nullptr, 0);
+ ModuleResult result = DecodeModule(nullptr, nullptr);
EXPECT_TRUE(result.ok());
}
@@ -476,11 +476,9 @@ TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
}
TEST_F(WasmModuleVerifyTest, OneI32Exception) {
- static const byte data[] = {
- SECTION_EXCEPTIONS(3), 1,
- // except[0] (i32)
- 1, kLocalI32,
- };
+ static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
+ // except[0] (i32)
+ 1, kLocalI32};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
@@ -525,6 +523,70 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_type) {
EXPECT_FALSE(result.ok());
}
+TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
+ static const byte data[] = {SECTION(Import, 1), 0, SECTION_EXCEPTIONS(1), 0,
+ SECTION(Export, 1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterExport) {
+ static const byte data[] = {SECTION(Export, 1), 0, SECTION_EXCEPTIONS(1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeImport) {
+ static const byte data[] = {SECTION_EXCEPTIONS(1), 0, SECTION(Import, 1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionImport) {
+ static const byte data[] = {SECTION(Import, 9), // section header
+ 1, // number of imports
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(2), // --
+ 'e', 'x', // exception name
+ kExternalException, // import kind
+ // except[0] (i32)
+ 1, kLocalI32};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(1u, result.val->exceptions.size());
+ EXPECT_EQ(1u, result.val->import_table.size());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionExport) {
+ static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
+ // except[0] (i32)
+ 1, kLocalI32, SECTION(Export, 4),
+ 1, // exports
+ NO_NAME, // --
+ kExternalException, // --
+ EXCEPTION_INDEX(0)};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(1u, result.val->exceptions.size());
+ EXPECT_EQ(1u, result.val->export_table.size());
+}
+
TEST_F(WasmModuleVerifyTest, OneSignature) {
{
static const byte data[] = {SIGNATURES_SECTION_VOID_VOID};
@@ -2068,7 +2130,7 @@ TEST_F(WasmModuleVerifyTest, Regression684855) {
class WasmInitExprDecodeTest : public TestWithZone {
public:
- WasmInitExprDecodeTest() {}
+ WasmInitExprDecodeTest() = default;
WasmFeatures enabled_features_;
@@ -2223,6 +2285,81 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
CheckSections(data, data + sizeof(data), expected, arraysize(expected));
}
+#define SRC_MAP \
+ 16, 's', 'o', 'u', 'r', 'c', 'e', 'M', 'a', 'p', 'p', 'i', 'n', 'g', 'U', \
+ 'R', 'L'
+TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
+#define SRC 's', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c'
+ static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, SRC};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(9u, result.val->source_map_url.size());
+ const char src[] = {SRC};
+ EXPECT_EQ(
+ 0,
+ strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
+ src, 9));
+#undef SRC
+}
+
+TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
+#define BAD_SRC 's', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c'
+ static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, BAD_SRC};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0u, result.val->source_map_url.size());
+#undef BAD_SRC
+}
+
+TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
+#define SRC 'a', 'b', 'c'
+ static const byte data[] = {SECTION(Unknown, 21),
+ SRC_MAP,
+ 3,
+ SRC,
+ SECTION(Unknown, 21),
+ SRC_MAP,
+ 3,
+ 'p',
+ 'q',
+ 'r'};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3u, result.val->source_map_url.size());
+ const char src[] = {SRC};
+ EXPECT_EQ(
+ 0,
+ strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
+ src, 3));
+#undef SRC
+}
+#undef SRC_MAP
+
+TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
+#define NAME_SECTION 4, 'n', 'a', 'm', 'e'
+ static const byte data[] = {SECTION(Unknown, 11),
+ NAME_SECTION,
+ 0,
+ 4,
+ 3,
+ 'a',
+ 'b',
+ 'c',
+ SECTION(Unknown, 12),
+ NAME_SECTION,
+ 0,
+ 5,
+ 4,
+ 'p',
+ 'q',
+ 'r',
+ 's'};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3u, result.val->name.length());
+#undef NAME_SECTION
+}
+
#undef WASM_FEATURE_SCOPE
#undef WASM_FEATURE_SCOPE_VAL
#undef EXPECT_INIT_EXPR
diff --git a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
index 1b4ddf5bb0..07e3ca888d 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
@@ -23,7 +23,7 @@ void CrashOnPurpose() { *reinterpret_cast<volatile int*>(42); }
// on failures.
class SignalHandlerFallbackTest : public ::testing::Test {
protected:
- virtual void SetUp() {
+ void SetUp() override {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
@@ -32,7 +32,7 @@ class SignalHandlerFallbackTest : public ::testing::Test {
sigaction(SIGBUS, &action, &old_bus_action_);
}
- virtual void TearDown() {
+ void TearDown() override {
// be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_segv_action_, nullptr);
sigaction(SIGBUS, &old_bus_action_, nullptr);
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index cc66f14d9c..5d695c8275 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -17,36 +17,34 @@ namespace wasm_heap_unittest {
class DisjointAllocationPoolTest : public ::testing::Test {
public:
- Address A(size_t n) { return static_cast<Address>(n); }
- void CheckLooksLike(const DisjointAllocationPool& mem,
- std::vector<std::pair<size_t, size_t>> expectation);
- void CheckLooksLike(AddressRange range,
- std::pair<size_t, size_t> expectation);
- DisjointAllocationPool Make(std::vector<std::pair<size_t, size_t>> model);
+ void CheckPool(const DisjointAllocationPool& mem,
+ std::initializer_list<base::AddressRegion> expected_regions);
+ void CheckRange(base::AddressRegion region1, base::AddressRegion region2);
+ DisjointAllocationPool Make(
+ std::initializer_list<base::AddressRegion> regions);
};
-void DisjointAllocationPoolTest::CheckLooksLike(
+void DisjointAllocationPoolTest::CheckPool(
const DisjointAllocationPool& mem,
- std::vector<std::pair<size_t, size_t>> expectation) {
- const auto& ranges = mem.ranges();
- CHECK_EQ(ranges.size(), expectation.size());
- auto iter = expectation.begin();
- for (auto it = ranges.begin(), e = ranges.end(); it != e; ++it, ++iter) {
- CheckLooksLike(*it, *iter);
+ std::initializer_list<base::AddressRegion> expected_regions) {
+ const auto& regions = mem.regions();
+ CHECK_EQ(regions.size(), expected_regions.size());
+ auto iter = expected_regions.begin();
+ for (auto it = regions.begin(), e = regions.end(); it != e; ++it, ++iter) {
+ CHECK_EQ(*it, *iter);
}
}
-void DisjointAllocationPoolTest::CheckLooksLike(
- AddressRange range, std::pair<size_t, size_t> expectation) {
- CHECK_EQ(range.start, A(expectation.first));
- CHECK_EQ(range.end, A(expectation.second));
+void DisjointAllocationPoolTest::CheckRange(base::AddressRegion region1,
+ base::AddressRegion region2) {
+ CHECK_EQ(region1, region2);
}
DisjointAllocationPool DisjointAllocationPoolTest::Make(
- std::vector<std::pair<size_t, size_t>> model) {
+ std::initializer_list<base::AddressRegion> regions) {
DisjointAllocationPool ret;
- for (auto& pair : model) {
- ret.Merge({A(pair.first), A(pair.second)});
+ for (auto& region : regions) {
+ ret.Merge(region);
}
return ret;
}
@@ -54,90 +52,90 @@ DisjointAllocationPool DisjointAllocationPoolTest::Make(
TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
DisjointAllocationPool a;
CHECK(a.IsEmpty());
- CheckLooksLike(a, {});
- a.Merge({1, 5});
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {});
+ a.Merge({1, 4});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, ConstructWithRange) {
- DisjointAllocationPool a({1, 5});
+ DisjointAllocationPool a({1, 4});
CHECK(!a.IsEmpty());
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
- DisjointAllocationPool a = Make({{1, 5}});
- AddressRange b = a.Allocate(2);
- CheckLooksLike(a, {{3, 5}});
- CheckLooksLike(b, {1, 3});
+ DisjointAllocationPool a = Make({{1, 4}});
+ base::AddressRegion b = a.Allocate(2);
+ CheckPool(a, {{3, 2}});
+ CheckRange(b, {1, 2});
a.Merge(b);
- CheckLooksLike(a, {{1, 5}});
- CHECK_EQ(a.ranges().size(), 1);
- CHECK_EQ(a.ranges().front().start, A(1));
- CHECK_EQ(a.ranges().front().end, A(5));
+ CheckPool(a, {{1, 4}});
+ CHECK_EQ(a.regions().size(), 1);
+ CHECK_EQ(a.regions().front().begin(), 1);
+ CHECK_EQ(a.regions().front().end(), 5);
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
- DisjointAllocationPool a({A(1), A(5)});
- AddressRange b = a.Allocate(4);
- CheckLooksLike(b, {1, 5});
+ DisjointAllocationPool a({1, 4});
+ base::AddressRegion b = a.Allocate(4);
+ CheckRange(b, {1, 4});
CHECK(a.IsEmpty());
a.Merge(b);
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, FailToExtract) {
- DisjointAllocationPool a = Make({{1, 5}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}});
+ DisjointAllocationPool a = Make({{1, 4}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
- DisjointAllocationPool a = Make({{1, 5}, {10, 14}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}, {10, 14}});
+ DisjointAllocationPool a = Make({{1, 4}, {10, 4}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}, {10, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, ExtractExact) {
- DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}});
- CheckLooksLike(b, {10, 15});
+ DisjointAllocationPool a = Make({{1, 4}, {10, 5}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}});
+ CheckRange(b, {10, 5});
}
TEST_F(DisjointAllocationPoolTest, Merging) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}});
- a.Merge({15, 20});
- CheckLooksLike(a, {{10, 25}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}});
+ a.Merge({15, 5});
+ CheckPool(a, {{10, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingMore) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({15, 20});
- a.Merge({25, 30});
- CheckLooksLike(a, {{10, 35}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({15, 5});
+ a.Merge({25, 5});
+ CheckPool(a, {{10, 25}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkip) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- CheckLooksLike(a, {{10, 15}, {20, 35}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ CheckPool(a, {{10, 5}, {20, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- a.Merge({35, 40});
- CheckLooksLike(a, {{10, 15}, {20, 40}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ a.Merge({35, 5});
+ CheckPool(a, {{10, 5}, {20, 20}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- a.Merge({36, 40});
- CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ a.Merge({36, 4});
+ CheckPool(a, {{10, 5}, {20, 15}, {36, 4}});
}
enum ModuleStyle : int { Fixed = 0, Growable = 1 };
@@ -200,7 +198,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
@@ -223,9 +221,12 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ // This fails in "reservation" if we cannot extend the code space, or in
+ // "commit" it we can (since we hit the allocation limit in the
+ // WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
@@ -237,7 +238,7 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
@@ -264,7 +265,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode");
} else {
// The module grows by one page. One page remains uncommitted.
CHECK_NOT_NULL(
diff --git a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
index 28b35793f7..807fc40959 100644
--- a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
@@ -28,7 +28,7 @@ TEST_F(WasmModuleBuilderTest, Regression_647329) {
// Test crashed with asan.
ZoneBuffer buffer(zone());
const size_t kSize = ZoneBuffer::kInitialSize * 3 + 4096 + 100;
- byte data[kSize];
+ byte data[kSize] = {0};
buffer.write(data, kSize);
}