summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/BUILD.gn3
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc132
-rw-r--r--deps/v8/test/unittests/base/atomic-utils-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc6
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/semaphore-unittest.cc8
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc48
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc48
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.cc119
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc52
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc117
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc1
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h2
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc177
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc52
-rw-r--r--deps/v8/test/unittests/date/date-cache-unittest.cc16
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc26
-rw-r--r--deps/v8/test/unittests/heap/barrier-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc47
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc141
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc22
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc72
-rw-r--r--deps/v8/test/unittests/libplatform/task-queue-unittest.cc4
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc2
-rw-r--r--deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc20
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc7
-rw-r--r--deps/v8/test/unittests/unittests.status13
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc57
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc35
-rw-r--r--deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc224
46 files changed, 994 insertions, 608 deletions
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 87013f9fbc..7a379f77e8 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -224,6 +224,7 @@ v8_source_set("unittests_sources") {
"wasm/wasm-compiler-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
+ "wasm/wasm-module-sourcemap-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
"wasm/wasm-text-unittest.cc",
"zone/zone-allocator-unittest.cc",
@@ -301,8 +302,8 @@ v8_source_set("unittests_sources") {
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../../third_party/inspector_protocol:encoding_test",
"../../third_party/inspector_protocol:bindings_test",
+ "../../third_party/inspector_protocol:encoding_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index 65e20d2510..3b63666f4b 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -27,6 +27,12 @@ MaybeLocal<Value> CompileRun(Isolate* isolate, const char* source) {
return script->Run(context);
}
+v8::Local<v8::String> v8_str(const char* x) {
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
} // namespace
TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
@@ -42,10 +48,8 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
Local<FunctionTemplate> setter_template = FunctionTemplate::New(
isolate(), [](const FunctionCallbackInfo<v8::Value>& info) { FAIL(); });
setter_template->SetAcceptAnyReceiver(false);
- global_template->SetAccessorProperty(
- String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
- .ToLocalChecked(),
- getter_template, setter_template);
+ global_template->SetAccessorProperty(v8_str("property"), getter_template,
+ setter_template);
Local<Context> target_context =
Context::New(isolate(), nullptr, global_template);
@@ -53,10 +57,7 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
Context::New(isolate(), nullptr, global_template);
accessing_context->Global()
- ->Set(accessing_context,
- String::NewFromUtf8(isolate(), "other", NewStringType::kNormal)
- .ToLocalChecked(),
- target_context->Global())
+ ->Set(accessing_context, v8_str("other"), target_context->Global())
.FromJust();
Context::Scope context_scope(accessing_context);
@@ -71,15 +72,118 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
" .set.call(other, 42);");
}
-namespace {
-bool failed_access_check_callback_called;
+class AccessRegressionTest : public AccessCheckTest {
+ protected:
+ i::Handle<i::JSFunction> RetrieveFunctionFrom(Local<Context> context,
+ const char* script) {
+ Context::Scope context_scope(context);
+ Local<Value> getter = CompileRun(isolate(), script).ToLocalChecked();
+ EXPECT_TRUE(getter->IsFunction());
+
+ i::Handle<i::JSReceiver> r =
+ Utils::OpenHandle(*Local<Function>::Cast(getter));
+ EXPECT_TRUE(r->IsJSFunction());
+ return i::Handle<i::JSFunction>::cast(r);
+ }
+};
-v8::Local<v8::String> v8_str(const char* x) {
- return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+TEST_F(AccessRegressionTest,
+ InstantiatedLazyAccessorPairsHaveCorrectNativeContext) {
+ // The setup creates two contexts and sets an object created
+ // in context 1 on the global of context 2.
+ // The object has an accessor pair {property}. Accessing the
+ // property descriptor of {property} causes instantiation of the
+ // accessor pair. The test checks that the access pair has the
+ // correct native context.
+ Local<FunctionTemplate> getter_template = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<Value>&) { FAIL(); });
+ Local<FunctionTemplate> setter_template = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+
+ Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+ object_template->SetAccessorProperty(v8_str("property"), getter_template,
+ setter_template);
+
+ Local<Context> context1 = Context::New(isolate(), nullptr);
+ Local<Context> context2 = Context::New(isolate(), nullptr);
+
+ Local<Object> object =
+ object_template->NewInstance(context1).ToLocalChecked();
+ context2->Global()
+ ->Set(context2, v8_str("object_from_context1"), object)
+ .Check();
+
+ i::Handle<i::JSFunction> getter = RetrieveFunctionFrom(
+ context2,
+ "Object.getOwnPropertyDescriptor(object_from_context1, 'property').get");
+
+ ASSERT_EQ(getter->native_context(), *Utils::OpenHandle(*context1));
+}
+
+// Regression test for https://crbug.com/986063.
+TEST_F(AccessRegressionTest,
+ InstantiatedLazyAccessorPairsHaveCorrectNativeContextDebug) {
+ // The setup creates two contexts and installs an object "object"
+ // on the global this for each context.
+ // The object consists of:
+ // - an accessor pair "property".
+ // - a normal function "breakfn".
+ //
+ // The test sets a break point on {object.breakfn} in the first context.
+ // This forces instantation of the JSFunction for the {object.property}
+ // accessor pair. The test verifies afterwards that the respective
+ // JSFunction of the getter have the correct native context.
+
+ Local<FunctionTemplate> getter_template = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<Value>&) { FAIL(); });
+ Local<FunctionTemplate> setter_template = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+ Local<FunctionTemplate> break_template = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+
+ Local<Context> context1 = Context::New(isolate(), nullptr);
+ Local<Context> context2 = Context::New(isolate(), nullptr);
+
+ Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+ object_template->Set(isolate(), "breakfn", break_template);
+ object_template->SetAccessorProperty(v8_str("property"), getter_template,
+ setter_template);
+
+ Local<Object> object1 =
+ object_template->NewInstance(context1).ToLocalChecked();
+ EXPECT_TRUE(
+ context1->Global()->Set(context1, v8_str("object"), object1).IsJust());
+
+ Local<Object> object2 =
+ object_template->NewInstance(context2).ToLocalChecked();
+ EXPECT_TRUE(
+ context2->Global()->Set(context2, v8_str("object"), object2).IsJust());
+
+ // Force instantiation of the JSFunction for the getter and setter
+ // of {object.property} by setting a break point on {object.breakfn}
+ {
+ Context::Scope context_scope(context1);
+ i::Isolate* iso = reinterpret_cast<i::Isolate*>(isolate());
+ i::Handle<i::JSFunction> break_fn =
+ RetrieveFunctionFrom(context1, "object.breakfn");
+
+ int id;
+ iso->debug()->SetBreakpointForFunction(i::handle(break_fn->shared(), iso),
+ iso->factory()->empty_string(), &id);
+ }
+
+ i::Handle<i::JSFunction> getter_c1 = RetrieveFunctionFrom(
+ context1, "Object.getOwnPropertyDescriptor(object, 'property').get");
+ i::Handle<i::JSFunction> getter_c2 = RetrieveFunctionFrom(
+ context2, "Object.getOwnPropertyDescriptor(object, 'property').get");
+
+ ASSERT_EQ(getter_c1->native_context(), *Utils::OpenHandle(*context1));
+ ASSERT_EQ(getter_c2->native_context(), *Utils::OpenHandle(*context2));
}
+namespace {
+bool failed_access_check_callback_called;
+
class AccessCheckTestConsoleDelegate : public debug::ConsoleDelegate {
public:
void Log(const debug::ConsoleCallArguments& args,
diff --git a/deps/v8/test/unittests/base/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index 442257eff8..7ef0e948d7 100644
--- a/deps/v8/test/unittests/base/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -105,7 +105,7 @@ TEST(AsAtomic8, CompareAndSwap_Concurrent) {
}
}
for (int i = 0; i < kThreadCount; i++) {
- threads[i].Start();
+ CHECK(threads[i].Start());
}
for (int i = 0; i < kThreadCount; i++) {
@@ -179,7 +179,7 @@ TEST(AsAtomicWord, SetBits_Concurrent) {
threads[i].Initialize(&word, i * 2);
}
for (int i = 0; i < kThreadCount; i++) {
- threads[i].Start();
+ CHECK(threads[i].Start());
}
for (int i = 0; i < kThreadCount; i++) {
threads[i].Join();
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index 6206569433..375f17ad2f 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -64,7 +64,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
MutexGuard lock_guard(&threads[n].mutex_);
EXPECT_FALSE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
- threads[n].Start();
+ CHECK(threads[n].Start());
// Wait for nth thread to start.
while (!threads[n].running_) {
threads[n].cv_.Wait(&threads[n].mutex_);
@@ -153,7 +153,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
for (int n = 0; n < kThreadCount; ++n) {
EXPECT_FALSE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
- threads[n].Start();
+ CHECK(threads[n].Start());
}
}
@@ -281,7 +281,7 @@ TEST(ConditionVariable, LoopIncrement) {
// Start all threads.
for (int n = thread_count - 1; n >= 0; --n) {
- threads[n]->Start();
+ CHECK(threads[n]->Start());
}
// Join and cleanup all threads.
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index d31d85447c..27154b3c24 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -79,7 +79,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
TEST_F(ThreadLocalStorageTest, DoTest) {
Run();
- Start();
+ CHECK(Start());
Join();
}
diff --git a/deps/v8/test/unittests/base/platform/semaphore-unittest.cc b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
index bd4a00fe95..3cddc565c7 100644
--- a/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
@@ -94,8 +94,8 @@ TEST(Semaphore, ProducerConsumer) {
Semaphore used_space(0);
ProducerThread producer_thread(buffer, &free_space, &used_space);
ConsumerThread consumer_thread(buffer, &free_space, &used_space);
- producer_thread.Start();
- consumer_thread.Start();
+ CHECK(producer_thread.Start());
+ CHECK(consumer_thread.Start());
producer_thread.Join();
consumer_thread.Join();
}
@@ -106,8 +106,8 @@ TEST(Semaphore, WaitAndSignal) {
WaitAndSignalThread t1(&semaphore);
WaitAndSignalThread t2(&semaphore);
- t1.Start();
- t2.Start();
+ CHECK(t1.Start());
+ CHECK(t2.Start());
// Make something available.
semaphore.Signal();
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 287a11442b..df387d3d94 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -14,6 +14,7 @@
using ::testing::_;
using v8::internal::compiler::Node;
+using v8::internal::compiler::TNode;
namespace c = v8::internal::compiler;
@@ -29,11 +30,11 @@ CodeStubAssemblerTestState::CodeStubAssemblerTestState(
TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
CodeStubAssemblerTestState state(this);
CodeStubAssemblerForTest m(&state);
- Node* value = m.Int32Constant(44);
+ TNode<IntPtrT> value = m.IntPtrConstant(44);
EXPECT_THAT(m.SmiTag(value),
IsBitcastWordToTaggedSigned(c::IsIntPtrConstant(
static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
- EXPECT_THAT(m.SmiUntag(value),
+ EXPECT_THAT(m.SmiUntag(m.ReinterpretCast<Smi>(value)),
c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
(kSmiShiftSize + kSmiTagSize)));
}
@@ -42,9 +43,9 @@ TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMax) {
CodeStubAssemblerTestState state(this);
CodeStubAssemblerForTest m(&state);
{
- Node* a = m.IntPtrConstant(100);
- Node* b = m.IntPtrConstant(1);
- Node* z = m.IntPtrMax(a, b);
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<IntPtrT> z = m.IntPtrMax(a, b);
EXPECT_THAT(z, c::IsIntPtrConstant(100));
}
}
@@ -53,9 +54,9 @@ TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMin) {
CodeStubAssemblerTestState state(this);
CodeStubAssemblerForTest m(&state);
{
- Node* a = m.IntPtrConstant(100);
- Node* b = m.IntPtrConstant(1);
- Node* z = m.IntPtrMin(a, b);
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<IntPtrT> z = m.IntPtrMin(a, b);
EXPECT_THAT(z, c::IsIntPtrConstant(1));
}
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index bc74e6fe19..8b15811d36 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -92,7 +92,7 @@ class CompilerDispatcherTest : public TestWithNativeContext {
ast_node_factory.NewFunctionLiteral(
function_name, function_scope, statements, -1, -1, -1,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
+ FunctionSyntaxKind::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
shared->function_literal_id(), nullptr);
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index ae2e42b61f..ff59e79a60 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -27,8 +27,7 @@ namespace {
class BlockingCompilationJob : public OptimizedCompilationJob {
public:
BlockingCompilationJob(Isolate* isolate, Handle<JSFunction> function)
- : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
- "BlockingCompilationJob",
+ : OptimizedCompilationJob(&info_, "BlockingCompilationJob",
State::kReadyToExecute),
shared_(function->shared(), isolate),
zone_(isolate->allocator(), ZONE_NAME),
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index a26a8d9192..bb4848db6c 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -3225,54 +3225,6 @@ TEST_F(InstructionSelectorTest, Float64Neg) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-TEST_F(InstructionSelectorTest, StackCheck0) {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
- Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
- RawMachineLabel if_true, if_false;
- m.Branch(interrupt, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
- EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
- EXPECT_EQ(4U, s[1]->InputCount());
- EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
- Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
- RawMachineLabel if_true, if_false;
- m.Branch(sp_within_limit, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
- EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
- EXPECT_EQ(4U, s[1]->InputCount());
- EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 867f89abfd..b969d9a278 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -4571,54 +4571,6 @@ TEST_F(InstructionSelectorTest, CompareFloat64HighGreaterThanOrEqualZero64) {
EXPECT_EQ(63, s.ToInt32(s[1]->InputAt(1)));
}
-TEST_F(InstructionSelectorTest, StackCheck0) {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
- Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
- RawMachineLabel if_true, if_false;
- m.Branch(interrupt, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
- EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
- EXPECT_EQ(4U, s[1]->InputCount());
- EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
- Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
- RawMachineLabel if_true, if_false;
- m.Branch(sp_within_limit, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
- EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
- EXPECT_EQ(4U, s[1]->InputCount());
- EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
TEST_F(InstructionSelectorTest, ExternalReferenceLoad1) {
// Test offsets we can use kMode_Root for.
const int64_t kOffsets[] = {0, 1, 4, INT32_MIN, INT32_MAX};
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index a48ad1b359..c29979c600 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -25,7 +25,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector::Features features,
InstructionSelectorTest::StreamBuilderMode mode,
InstructionSelector::SourcePositionMode source_position_mode) {
- Schedule* schedule = Export();
+ Schedule* schedule = ExportForTest();
if (FLAG_trace_turbo) {
StdoutStream{} << "=== Schedule before instruction selection ==="
<< std::endl
@@ -40,11 +40,13 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
instruction_blocks);
SourcePositionTable source_position_table(graph());
TickCounter tick_counter;
+ size_t max_unoptimized_frame_height = 0;
InstructionSelector selector(
test_->zone(), node_count, &linkage, &sequence, schedule,
&source_position_table, nullptr,
InstructionSelector::kEnableSwitchJumpTable, &tick_counter,
- source_position_mode, features, InstructionSelector::kDisableScheduling,
+ &max_unoptimized_frame_height, source_position_mode, features,
+ InstructionSelector::kDisableScheduling,
InstructionSelector::kEnableRootsRelativeAddressing,
PoisoningMitigationLevel::kPoisonAll);
selector.SelectInstructions();
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 34fb84957c..a231539f6f 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -39,7 +39,6 @@ class BranchEliminationTest : public GraphTest {
MachineOperatorBuilder machine_;
};
-
TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
// { return (x ? (x ? 1 : 2) : 3; }
// should be reduced to
@@ -80,7 +79,6 @@ TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
IsInt32Constant(2), IsMerge(outer_if_true, IsDead())));
}
-
TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
// { return (x ? 1 : (x ? 2 : 3); }
// should be reduced to
@@ -122,10 +120,9 @@ TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
IsInt32Constant(3), IsMerge(IsDead(), outer_if_false)));
}
-
TEST_F(BranchEliminationTest, BranchAfterDiamond) {
// { var y = x ? 1 : 2; return y + x ? 3 : 4; }
- // should not be reduced.
+ // second branch's condition should be replaced with a phi.
Node* condition = Parameter(0);
Node* branch1 =
@@ -136,7 +133,7 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
Node* phi1 =
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
Int32Constant(1), Int32Constant(2), merge1);
-
+ // Second branch use the same condition.
Node* branch2 = graph()->NewNode(common()->Branch(), condition, merge1);
Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
@@ -145,7 +142,6 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
Int32Constant(3), Int32Constant(4), merge1);
-
Node* add = graph()->NewNode(machine()->Int32Add(), phi1, phi2);
Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* ret =
@@ -154,13 +150,13 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
Reduce();
- // Outer branch should not be rewritten, the inner branch condition should
- // be true.
- EXPECT_THAT(branch1, IsBranch(condition, graph()->start()));
- EXPECT_THAT(branch2, IsBranch(condition, merge1));
+ // The branch condition for branch2 should be a phi with constants.
+ EXPECT_THAT(branch2,
+ IsBranch(IsPhi(MachineRepresentation::kWord32, IsInt32Constant(1),
+ IsInt32Constant(0), merge1),
+ merge1));
}
-
TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
// if (x) while (x) { return 2; } else { return 1; }
// should be rewritten to
@@ -172,7 +168,6 @@ TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
graph()->NewNode(common()->Branch(), condition, graph()->start());
Node* outer_if_true = graph()->NewNode(common()->IfTrue(), outer_branch);
-
Node* loop = graph()->NewNode(common()->Loop(1), outer_if_true);
Node* effect =
graph()->NewNode(common()->EffectPhi(1), graph()->start(), loop);
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
index 0541f68440..43dfd9876f 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -13,6 +13,7 @@
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
+using ::testing::Eq;
namespace v8 {
namespace internal {
@@ -28,29 +29,29 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrAdd) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* add = m.IntPtrAdd(a, b);
- EXPECT_THAT(add, IsIntPtrAdd(a, b));
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<WordT> add = m.IntPtrAdd(a, b);
+ EXPECT_THAT(add, IsIntPtrAdd(Eq(a), Eq(b)));
}
// x + 0 => x
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(0);
- Node* add = m.IntPtrAdd(a, b);
+ TNode<IntPtrT> b = m.IntPtrConstant(0);
+ TNode<WordT> add = m.IntPtrAdd(a, b);
EXPECT_THAT(add, a);
}
// 0 + x => x
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(0);
- Node* add = m.IntPtrAdd(b, a);
+ TNode<IntPtrT> b = m.IntPtrConstant(0);
+ TNode<WordT> add = m.IntPtrAdd(b, a);
EXPECT_THAT(add, a);
}
// CONST_a + CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(22);
- Node* b = m.IntPtrConstant(33);
- Node* c = m.IntPtrAdd(a, b);
+ TNode<IntPtrT> a = m.IntPtrConstant(22);
+ TNode<IntPtrT> b = m.IntPtrConstant(33);
+ TNode<IntPtrT> c = m.IntPtrAdd(a, b);
EXPECT_THAT(c, IsIntPtrConstant(55));
}
}
@@ -60,22 +61,22 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrSub) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* sub = m.IntPtrSub(a, b);
- EXPECT_THAT(sub, IsIntPtrSub(a, b));
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<WordT> sub = m.IntPtrSub(a, b);
+ EXPECT_THAT(sub, IsIntPtrSub(Eq(a), Eq(b)));
}
// x - 0 => x
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(0);
- Node* c = m.IntPtrSub(a, b);
+ TNode<IntPtrT> b = m.IntPtrConstant(0);
+ TNode<WordT> c = m.IntPtrSub(a, b);
EXPECT_THAT(c, a);
}
// CONST_a - CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(100);
- Node* b = m.IntPtrConstant(1);
- Node* c = m.IntPtrSub(a, b);
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<IntPtrT> c = m.IntPtrSub(a, b);
EXPECT_THAT(c, IsIntPtrConstant(99));
}
}
@@ -85,43 +86,43 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrMul) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(100);
- Node* mul = m.IntPtrMul(a, b);
- EXPECT_THAT(mul, IsIntPtrMul(a, b));
+ TNode<IntPtrT> b = m.IntPtrConstant(100);
+ TNode<WordT> mul = m.IntPtrMul(a, b);
+ EXPECT_THAT(mul, IsIntPtrMul(Eq(a), Eq(b)));
}
// x * 1 => x
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* mul = m.IntPtrMul(a, b);
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<WordT> mul = m.IntPtrMul(a, b);
EXPECT_THAT(mul, a);
}
// 1 * x => x
{
Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* mul = m.IntPtrMul(b, a);
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<WordT> mul = m.IntPtrMul(b, a);
EXPECT_THAT(mul, a);
}
// CONST_a * CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(100);
- Node* b = m.IntPtrConstant(5);
- Node* c = m.IntPtrMul(a, b);
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(5);
+ TNode<IntPtrT> c = m.IntPtrMul(a, b);
EXPECT_THAT(c, IsIntPtrConstant(500));
}
// x * 2^CONST => x << CONST
{
Node* a = m.Parameter(0);
- Node* b = m.IntPtrConstant(1 << 3);
- Node* c = m.IntPtrMul(a, b);
+ TNode<IntPtrT> b = m.IntPtrConstant(1 << 3);
+ TNode<WordT> c = m.IntPtrMul(a, b);
EXPECT_THAT(c, IsWordShl(a, IsIntPtrConstant(3)));
}
// 2^CONST * x => x << CONST
{
- Node* a = m.IntPtrConstant(1 << 3);
+ TNode<IntPtrT> a = m.IntPtrConstant(1 << 3);
Node* b = m.Parameter(0);
- Node* c = m.IntPtrMul(a, b);
+ TNode<WordT> c = m.IntPtrMul(a, b);
EXPECT_THAT(c, IsWordShl(b, IsIntPtrConstant(3)));
}
}
@@ -169,19 +170,19 @@ TARGET_TEST_F(CodeAssemblerTest, WordShl) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* add = m.WordShl(a, 10);
+ TNode<WordT> add = m.WordShl(a, 10);
EXPECT_THAT(add, IsWordShl(a, IsIntPtrConstant(10)));
}
// x << 0 => x
{
Node* a = m.Parameter(0);
- Node* add = m.WordShl(a, 0);
+ TNode<WordT> add = m.WordShl(a, 0);
EXPECT_THAT(add, a);
}
// CONST_a << CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(1024);
- Node* shl = m.WordShl(a, 2);
+ TNode<IntPtrT> a = m.IntPtrConstant(1024);
+ TNode<WordT> shl = m.WordShl(a, 2);
EXPECT_THAT(shl, IsIntPtrConstant(4096));
}
}
@@ -191,25 +192,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordShr) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* shr = m.WordShr(a, 10);
+ TNode<WordT> shr = m.WordShr(a, 10);
EXPECT_THAT(shr, IsWordShr(a, IsIntPtrConstant(10)));
}
// x >> 0 => x
{
Node* a = m.Parameter(0);
- Node* shr = m.WordShr(a, 0);
+ TNode<WordT> shr = m.WordShr(a, 0);
EXPECT_THAT(shr, a);
}
// +CONST_a >> CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(4096);
- Node* shr = m.WordShr(a, 2);
+ TNode<IntPtrT> a = m.IntPtrConstant(4096);
+ TNode<IntPtrT> shr = m.WordShr(a, 2);
EXPECT_THAT(shr, IsIntPtrConstant(1024));
}
// -CONST_a >> CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(-1234);
- Node* shr = m.WordShr(a, 2);
+ TNode<IntPtrT> a = m.IntPtrConstant(-1234);
+ TNode<IntPtrT> shr = m.WordShr(a, 2);
EXPECT_THAT(shr, IsIntPtrConstant(static_cast<uintptr_t>(-1234) >> 2));
}
}
@@ -219,25 +220,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordSar) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* sar = m.WordSar(a, m.IntPtrConstant(10));
+ TNode<WordT> sar = m.WordSar(a, m.IntPtrConstant(10));
EXPECT_THAT(sar, IsWordSar(a, IsIntPtrConstant(10)));
}
// x >>> 0 => x
{
Node* a = m.Parameter(0);
- Node* sar = m.WordSar(a, m.IntPtrConstant(0));
+ TNode<WordT> sar = m.WordSar(a, m.IntPtrConstant(0));
EXPECT_THAT(sar, a);
}
// +CONST_a >>> CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(4096);
- Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+ TNode<IntPtrT> a = m.IntPtrConstant(4096);
+ TNode<IntPtrT> sar = m.WordSar(a, m.IntPtrConstant(2));
EXPECT_THAT(sar, IsIntPtrConstant(1024));
}
// -CONST_a >>> CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(-1234);
- Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+ TNode<IntPtrT> a = m.IntPtrConstant(-1234);
+ TNode<IntPtrT> sar = m.WordSar(a, m.IntPtrConstant(2));
EXPECT_THAT(sar, IsIntPtrConstant(static_cast<intptr_t>(-1234) >> 2));
}
}
@@ -247,25 +248,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordOr) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* z = m.WordOr(a, m.IntPtrConstant(8));
+ TNode<WordT> z = m.WordOr(a, m.IntPtrConstant(8));
EXPECT_THAT(z, IsWordOr(a, IsIntPtrConstant(8)));
}
// x | 0 => x
{
Node* a = m.Parameter(0);
- Node* z = m.WordOr(a, m.IntPtrConstant(0));
+ TNode<WordT> z = m.WordOr(a, m.IntPtrConstant(0));
EXPECT_THAT(z, a);
}
// 0 | x => x
{
Node* a = m.Parameter(0);
- Node* z = m.WordOr(m.IntPtrConstant(0), a);
+ TNode<WordT> z = m.WordOr(m.IntPtrConstant(0), a);
EXPECT_THAT(z, a);
}
// CONST_a | CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(3);
- Node* b = m.WordOr(a, m.IntPtrConstant(7));
+ TNode<IntPtrT> a = m.IntPtrConstant(3);
+ TNode<WordT> b = m.WordOr(a, m.IntPtrConstant(7));
EXPECT_THAT(b, IsIntPtrConstant(7));
}
}
@@ -275,13 +276,13 @@ TARGET_TEST_F(CodeAssemblerTest, WordAnd) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* z = m.WordAnd(a, m.IntPtrConstant(8));
+ TNode<WordT> z = m.WordAnd(a, m.IntPtrConstant(8));
EXPECT_THAT(z, IsWordAnd(a, IsIntPtrConstant(8)));
}
// CONST_a & CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(3);
- Node* b = m.WordAnd(a, m.IntPtrConstant(7));
+ TNode<IntPtrT> a = m.IntPtrConstant(3);
+ TNode<IntPtrT> b = m.WordAnd(a, m.IntPtrConstant(7));
EXPECT_THAT(b, IsIntPtrConstant(3));
}
}
@@ -291,13 +292,13 @@ TARGET_TEST_F(CodeAssemblerTest, WordXor) {
CodeAssemblerForTest m(&state);
{
Node* a = m.Parameter(0);
- Node* z = m.WordXor(a, m.IntPtrConstant(8));
+ TNode<WordT> z = m.WordXor(a, m.IntPtrConstant(8));
EXPECT_THAT(z, IsWordXor(a, IsIntPtrConstant(8)));
}
// CONST_a ^ CONST_b => CONST_c
{
- Node* a = m.IntPtrConstant(3);
- Node* b = m.WordXor(a, m.IntPtrConstant(7));
+ TNode<IntPtrT> a = m.IntPtrConstant(3);
+ TNode<WordT> b = m.WordXor(a, m.IntPtrConstant(7));
EXPECT_THAT(b, IsIntPtrConstant(4));
}
}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 485df8e401..ee6b7c02a3 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -23,7 +23,7 @@ GraphTest::GraphTest(int num_parameters)
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
- broker()->SetNativeContextRef();
+ broker()->SetTargetNativeContextRef(isolate()->native_context());
}
GraphTest::~GraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 8851a6a2df..ba6d3f299e 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -836,58 +836,6 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-TEST_F(InstructionSelectorTest, StackCheck0) {
- ExternalReference js_stack_limit =
- ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
- StreamBuilder m(this, MachineType::Int32());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit =
- m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
- Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
- RawMachineLabel if_true, if_false;
- m.Branch(interrupt, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kIA32Cmp, s[0]->arch_opcode());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
- ExternalReference js_stack_limit =
- ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
- StreamBuilder m(this, MachineType::Int32());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit =
- m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
- Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
- RawMachineLabel if_true, if_false;
- m.Branch(sp_within_limit, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 84d42b31d0..52769b09de 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/compiler/int64-lowering.h"
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/signature.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
@@ -10,7 +13,6 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/objects/objects-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-module.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -48,6 +50,25 @@ class Int64LoweringTest : public GraphTest {
lowering.LowerGraph();
}
+ void LowerGraphWithSpecialCase(
+ Node* node, std::unique_ptr<Int64LoweringSpecialCase> special_case,
+ MachineRepresentation rep) {
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, node,
+ graph()->start(), graph()->start());
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+
+ // Create a signature for the outer wasm<>js call; for these tests we focus
+ // on lowering the special cases rather than the wrapper node at the
+ // JavaScript boundaries.
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
+ sig_builder.AddReturn(rep);
+
+ Int64Lowering lowering(graph(), machine(), common(), zone(),
+ sig_builder.Build(), std::move(special_case));
+ lowering.LowerGraph();
+ }
+
void LowerGraph(Node* node, MachineRepresentation return_type,
MachineRepresentation rep = MachineRepresentation::kWord32,
int num_params = 0) {
@@ -968,6 +989,100 @@ TEST_F(Int64LoweringTest, LoopCycle) {
LowerGraph(load, MachineRepresentation::kWord64);
}
+
+TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
+ Node* target = Int32Constant(1);
+ Node* context = Int32Constant(2);
+ Node* bigint = Int32Constant(4);
+
+ CallDescriptor* bigint_to_i64_call_descriptor =
+ Linkage::GetStubCallDescriptor(
+ zone(), // zone
+ BigIntToI64Descriptor(), // descriptor
+ BigIntToI64Descriptor()
+ .GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallCodeObject); // stub call mode
+
+ CallDescriptor* bigint_to_i32_pair_call_descriptor =
+ Linkage::GetStubCallDescriptor(
+ zone(), // zone
+ BigIntToI32PairDescriptor(), // descriptor
+ BigIntToI32PairDescriptor()
+ .GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallCodeObject); // stub call mode
+
+ auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case->bigint_to_i64_call_descriptor =
+ bigint_to_i64_call_descriptor;
+ lowering_special_case->bigint_to_i32_pair_call_descriptor =
+ bigint_to_i32_pair_call_descriptor;
+
+ Node* call_node =
+ graph()->NewNode(common()->Call(bigint_to_i64_call_descriptor), target,
+ bigint, context, start(), start());
+
+ LowerGraphWithSpecialCase(call_node, std::move(lowering_special_case),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> call;
+ Matcher<Node*> call_matcher =
+ IsCall(bigint_to_i32_pair_call_descriptor, target, bigint, context,
+ start(), start());
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&call), call_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&call), call_matcher)),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
+ Node* target = Int32Constant(1);
+ Node* i64 = Int64Constant(value(0));
+
+ CallDescriptor* i64_to_bigint_call_descriptor =
+ Linkage::GetStubCallDescriptor(
+ zone(), // zone
+ I64ToBigIntDescriptor(), // descriptor
+ I64ToBigIntDescriptor()
+ .GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallCodeObject); // stub call mode
+
+ CallDescriptor* i32_pair_to_bigint_call_descriptor =
+ Linkage::GetStubCallDescriptor(
+ zone(), // zone
+ I32PairToBigIntDescriptor(), // descriptor
+ I32PairToBigIntDescriptor()
+ .GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallCodeObject); // stub call mode
+
+ auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case->i64_to_bigint_call_descriptor =
+ i64_to_bigint_call_descriptor;
+ lowering_special_case->i32_pair_to_bigint_call_descriptor =
+ i32_pair_to_bigint_call_descriptor;
+
+ Node* call = graph()->NewNode(common()->Call(i64_to_bigint_call_descriptor),
+ target, i64, start(), start());
+
+ LowerGraphWithSpecialCase(call, std::move(lowering_special_case),
+ MachineRepresentation::kTaggedPointer);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(IsCall(i32_pair_to_bigint_call_descriptor, target,
+ IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(high_word_value(0)), start(), start()),
+ start(), start()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 3d4e16ac68..7c062698c4 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -6,6 +6,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/feedback-source.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/simplified-operator.h"
@@ -23,7 +24,6 @@ class JSCallReducerTest : public TypedGraphTest {
public:
JSCallReducerTest()
: TypedGraphTest(3), javascript_(zone()), deps_(broker(), zone()) {
- broker()->SerializeStandardObjects();
}
~JSCallReducerTest() override = default;
@@ -113,7 +113,7 @@ class JSCallReducerTest : public TypedGraphTest {
ClosureFeedbackCellArray::New(isolate(), shared);
Handle<FeedbackVector> vector =
FeedbackVector::New(isolate(), shared, closure_feedback_cell_array);
- VectorSlotPair feedback(vector, FeedbackSlot(0), UNINITIALIZED);
+ FeedbackSource feedback(vector, FeedbackSlot(0));
return javascript()->Call(arity, CallFrequency(), feedback,
ConvertReceiverMode::kAny,
SpeculationMode::kAllowSpeculation);
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 95c03e543f..fb5254903d 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -172,7 +172,8 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
+ Handle<ScopeInfo> scope_info =
+ ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -192,7 +193,8 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
+ Handle<ScopeInfo> scope_info =
+ ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 3510cd4b74..5b4088f28e 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -37,7 +37,7 @@ class JSIntrinsicLoweringTest : public GraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph(), tick_counter());
- JSIntrinsicLowering reducer(&graph_reducer, &jsgraph);
+ JSIntrinsicLowering reducer(&graph_reducer, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 0d7bb946e3..0d85253847 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -322,12 +322,13 @@ TEST_F(JSTypedLoweringTest, JSLoadContext) {
Reduction const r2 = Reduce(graph()->NewNode(
javascript()->LoadContext(1, index, immutable), context, effect));
ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(),
- IsLoadField(AccessBuilder::ForContextSlot(index),
- IsLoadField(AccessBuilder::ForContextSlot(
- Context::PREVIOUS_INDEX),
- context, effect, graph()->start()),
- _, graph()->start()));
+ EXPECT_THAT(
+ r2.replacement(),
+ IsLoadField(AccessBuilder::ForContextSlot(index),
+ IsLoadField(AccessBuilder::ForContextSlotKnownPointer(
+ Context::PREVIOUS_INDEX),
+ context, effect, graph()->start()),
+ _, graph()->start()));
}
}
}
@@ -357,12 +358,13 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
Reduce(graph()->NewNode(javascript()->StoreContext(1, index), value,
context, effect, control));
ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(),
- IsStoreField(AccessBuilder::ForContextSlot(index),
- IsLoadField(AccessBuilder::ForContextSlot(
- Context::PREVIOUS_INDEX),
- context, effect, graph()->start()),
- value, _, control));
+ EXPECT_THAT(
+ r2.replacement(),
+ IsStoreField(AccessBuilder::ForContextSlot(index),
+ IsLoadField(AccessBuilder::ForContextSlotKnownPointer(
+ Context::PREVIOUS_INDEX),
+ context, effect, graph()->start()),
+ value, _, control));
}
}
}
@@ -373,7 +375,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
- VectorSlotPair feedback;
+ FeedbackSource feedback;
Handle<Name> name = factory()->length_string();
Node* const receiver = Parameter(Type::String(), 0);
Node* const context = UndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 17dc998f6d..d86771a8c3 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -55,8 +55,8 @@ TEST_F(LinkageTailCall, EmptyToEmpty) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc->CanTailCall(node));
const CallDescriptor* callee = CallDescriptorOf(node->op());
+ EXPECT_TRUE(desc->CanTailCall(callee));
int stack_param_delta = callee->GetStackParameterDelta(desc);
EXPECT_EQ(0, stack_param_delta);
}
@@ -74,7 +74,7 @@ TEST_F(LinkageTailCall, SameReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -94,7 +94,7 @@ TEST_F(LinkageTailCall, DifferingReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(!desc1->CanTailCall(node));
+ EXPECT_FALSE(desc1->CanTailCall(CallDescriptorOf(node->op())));
}
@@ -113,7 +113,7 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -134,7 +134,7 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -155,7 +155,7 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
// We might need to add one slot of padding to the callee arguments.
int expected = kPadArguments ? 2 : 1;
@@ -178,7 +178,7 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
// We might need to drop one slot of padding from the caller's arguments.
int expected = kPadArguments ? -2 : -1;
@@ -206,7 +206,7 @@ TEST_F(LinkageTailCall, MatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -232,7 +232,7 @@ TEST_F(LinkageTailCall, NonMatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -259,7 +259,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -287,7 +287,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -315,7 +315,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
// We might need to add one slot of padding to the callee arguments.
int expected = kPadArguments ? 0 : -1;
@@ -345,7 +345,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
// We might need to drop one slot of padding from the caller's arguments.
int expected = kPadArguments ? 0 : 1;
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 24fc6a31c7..d0acbf341c 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -245,7 +245,7 @@ const PureOperator kPureOperators[] = {
PURE(Float64Equal, 2, 0, 1), // --
PURE(Float64LessThan, 2, 0, 1), // --
PURE(Float64LessThanOrEqual, 2, 0, 1), // --
- PURE(LoadStackPointer, 0, 0, 1), // --
+ PURE(StackPointerGreaterThan, 1, 0, 1), // --
PURE(Float64ExtractLowWord32, 1, 0, 1), // --
PURE(Float64ExtractHighWord32, 1, 0, 1), // --
PURE(Float64InsertLowWord32, 2, 0, 1), // --
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index fc6f1d5500..6fa4ce0cf0 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2204,6 +2204,7 @@ IS_UNOP_MATCHER(ChangeInt32ToInt64)
IS_UNOP_MATCHER(ChangeUint32ToFloat64)
IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(ChangeCompressedToTagged)
+IS_UNOP_MATCHER(ChangeCompressedPointerToTaggedPointer)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float32Abs)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index be8d67cb35..a71aff913b 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -427,6 +427,8 @@ Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeCompressedToTagged(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeCompressedPointerToTaggedPointer(
+ const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 76fbc4a368..9655fc70b2 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -5,6 +5,7 @@
#include "src/compiler/redundancy-elimination.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/feedback-source.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -40,26 +41,24 @@ class RedundancyEliminationTest : public GraphTest {
ClosureFeedbackCellArray::New(isolate(), shared);
Handle<FeedbackVector> feedback_vector =
FeedbackVector::New(isolate(), shared, closure_feedback_cell_array);
- vector_slot_pairs_.push_back(VectorSlotPair());
- vector_slot_pairs_.push_back(
- VectorSlotPair(feedback_vector, slot1, UNINITIALIZED));
- vector_slot_pairs_.push_back(
- VectorSlotPair(feedback_vector, slot2, UNINITIALIZED));
+ vector_slot_pairs_.push_back(FeedbackSource());
+ vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot1));
+ vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot2));
}
~RedundancyEliminationTest() override = default;
protected:
Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
- std::vector<VectorSlotPair> const& vector_slot_pairs() const {
+ std::vector<FeedbackSource> const& vector_slot_pairs() const {
return vector_slot_pairs_;
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
NiceMock<MockAdvancedReducerEditor> editor_;
- std::vector<VectorSlotPair> vector_slot_pairs_;
- VectorSlotPair feedback2_;
+ std::vector<FeedbackSource> vector_slot_pairs_;
+ FeedbackSource feedback2_;
RedundancyElimination reducer_;
SimplifiedOperatorBuilder simplified_;
};
@@ -88,8 +87,8 @@ const NumberOperationHint kNumberOperationHints[] = {
// CheckBounds
TEST_F(RedundancyEliminationTest, CheckBounds) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
@@ -114,8 +113,8 @@ TEST_F(RedundancyEliminationTest, CheckBounds) {
// CheckNumber
TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -201,7 +200,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
CheckStringSubsumedByCheckInternalizedString) {
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -245,8 +244,8 @@ TEST_F(RedundancyEliminationTest, CheckSymbol) {
// CheckedFloat64ToInt32
TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -274,8 +273,8 @@ TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
// CheckedFloat64ToInt64
TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -306,8 +305,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -333,8 +332,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
// CheckedInt32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -360,8 +359,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
// CheckedInt64ToInt32
TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -385,8 +384,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
// CheckedInt64ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -412,8 +411,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
// CheckedTaggedSignedToInt32
TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -439,8 +438,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
// CheckedTaggedToFloat64
TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -466,8 +465,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
TEST_F(RedundancyEliminationTest,
CheckedTaggedToFloat64SubsubmedByCheckedTaggedToFloat64) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -497,8 +496,8 @@ TEST_F(RedundancyEliminationTest,
// CheckedTaggedToInt32
TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -524,8 +523,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
TEST_F(RedundancyEliminationTest,
CheckedTaggedToInt32SubsumedByCheckedTaggedSignedToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -553,8 +552,8 @@ TEST_F(RedundancyEliminationTest,
// CheckedTaggedToInt64
TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -582,8 +581,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
// CheckedTaggedToTaggedPointer
TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -609,8 +608,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
// CheckedTaggedToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -636,8 +635,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
// CheckedCompressedToTaggedPointer
TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedPointer) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -663,8 +662,8 @@ TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedPointer) {
// CheckedCompressedToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -690,8 +689,8 @@ TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedSigned) {
// CheckedTaggedToCompressedPointer
TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedPointer) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -717,8 +716,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedPointer) {
// CheckedTaggedToCompressedSigned
TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -744,8 +743,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedSigned) {
// CheckedTruncateTaggedToWord32
TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
@@ -771,8 +770,8 @@ TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
TEST_F(RedundancyEliminationTest,
CheckedTruncateTaggedToWord32SubsumedByCheckedTruncateTaggedToWord32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -802,8 +801,8 @@ TEST_F(RedundancyEliminationTest,
// CheckedUint32Bounds
TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
@@ -832,8 +831,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
// CheckedUint32ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -859,8 +858,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
// CheckedUint32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -886,8 +885,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
// CheckedUint64Bounds
TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
@@ -914,8 +913,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
// CheckedUint64ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -941,8 +940,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
// CheckedUint64ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -970,8 +969,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1006,8 +1005,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1045,8 +1044,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1081,8 +1080,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1120,8 +1119,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1156,8 +1155,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1195,7 +1194,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberAddWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
@@ -1221,7 +1220,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
@@ -1251,7 +1250,7 @@ TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
@@ -1279,7 +1278,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
@@ -1310,7 +1309,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
@@ -1338,7 +1337,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
@@ -1369,7 +1368,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
@@ -1397,7 +1396,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
@@ -1428,8 +1427,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeToNumberWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* index = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
@@ -1456,8 +1455,8 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
- TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
- TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* index = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index b198592ddd..e2d4f080f5 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -360,7 +360,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckedFloat64ToInt32WithConstant) {
TRACED_FOREACH(int32_t, n, kInt32Values) {
Reduction r = Reduce(graph()->NewNode(
simplified()->CheckedFloat64ToInt32(
- CheckForMinusZeroMode::kDontCheckForMinusZero, VectorSlotPair()),
+ CheckForMinusZeroMode::kDontCheckForMinusZero, FeedbackSource()),
Float64Constant(n), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(n));
@@ -418,7 +418,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithChangeInt31ToTaggedSigned) {
Node* value =
graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+ simplified()->CheckSmi(FeedbackSource()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
@@ -428,7 +428,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithNumberConstant) {
Node* control = graph()->start();
Node* value = NumberConstant(1.0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+ simplified()->CheckSmi(FeedbackSource()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
@@ -438,9 +438,9 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithCheckSmi) {
Node* effect = graph()->start();
Node* control = graph()->start();
Node* value = effect = graph()->NewNode(
- simplified()->CheckSmi(VectorSlotPair()), param0, effect, control);
+ simplified()->CheckSmi(FeedbackSource()), param0, effect, control);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+ simplified()->CheckSmi(FeedbackSource()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index ec68993213..23e4dbe5ae 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -463,7 +463,7 @@ TEST_MONOTONICITY(Add)
#undef TEST_MONOTONICITY
TEST_F(TyperTest, Monotonicity_InstanceOf) {
- TestBinaryMonotonicity(javascript_.InstanceOf(VectorSlotPair()));
+ TestBinaryMonotonicity(javascript_.InstanceOf(FeedbackSource()));
}
// JS BINOPS without hint
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index f8e3e26aa9..a62e824600 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1718,58 +1718,6 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
}
}
-TEST_F(InstructionSelectorTest, StackCheck0) {
- ExternalReference js_stack_limit =
- ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
- StreamBuilder m(this, MachineType::Int32());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit =
- m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
- Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
- RawMachineLabel if_true, if_false;
- m.Branch(interrupt, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Cmp, s[0]->arch_opcode());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
- ExternalReference js_stack_limit =
- ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
- StreamBuilder m(this, MachineType::Int32());
- Node* const sp = m.LoadStackPointer();
- Node* const stack_limit =
- m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
- Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
- RawMachineLabel if_true, if_false;
- m.Branch(sp_within_limit, &if_true, &if_false);
-
- m.Bind(&if_true);
- m.Return(m.Int32Constant(1));
-
- m.Bind(&if_false);
- m.Return(m.Int32Constant(0));
-
- Stream s = m.Build();
-
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64StackCheck, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/date/date-cache-unittest.cc b/deps/v8/test/unittests/date/date-cache-unittest.cc
index 2ceaaebabb..2c252424e7 100644
--- a/deps/v8/test/unittests/date/date-cache-unittest.cc
+++ b/deps/v8/test/unittests/date/date-cache-unittest.cc
@@ -69,12 +69,12 @@ TEST(DateCache, AdoptDefaultFirst) {
// We finish all the operation AdoptDefaultThread before
// running all other thread so it won't show the problem of
// AdoptDefault trashing newly create default.
- t1.Start();
+ CHECK(t1.Start());
t1.Join();
- t2.Start();
- t3.Start();
- t4.Start();
+ CHECK(t2.Start());
+ CHECK(t3.Start());
+ CHECK(t4.Start());
t2.Join();
t3.Join();
@@ -92,10 +92,10 @@ TEST(DateCache, AdoptDefaultMixed) {
// it will cause crash in other thread because the TimeZone
// newly created by createDefault could be trashed by AdoptDefault
// while a deleted DEFAULT_ZONE got cloned.
- t1.Start();
- t2.Start();
- t3.Start();
- t4.Start();
+ CHECK(t1.Start());
+ CHECK(t2.Start());
+ CHECK(t3.Start());
+ CHECK(t4.Start());
t1.Join();
t2.Join();
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 37b037147b..6cb9df0895 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -377,32 +377,6 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_Run) {
}
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_FinalizationGroup) {
- // Enqueue an FinalizationGroupCleanupTask.
- Handle<JSArray> ran = RunJS<JSArray>(
- "var ran = [false];"
- "var wf = new FinalizationGroup(() => { ran[0] = true; });"
- "(function() { wf.register({}, {}); })();"
- "gc();"
- "ran");
-
- EXPECT_TRUE(
- Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
- EXPECT_EQ(1, microtask_queue()->size());
-
- // Detach MicrotaskQueue from the current context.
- context()->DetachGlobal();
-
- microtask_queue()->RunMicrotasks(isolate());
-
- // RunMicrotasks processes the pending Microtask, but Microtasks that are
- // associated to a detached context should be cancelled and should not take
- // effect.
- EXPECT_EQ(0, microtask_queue()->size());
- EXPECT_TRUE(
- Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
-}
-
namespace {
void DummyPromiseHook(PromiseHookType type, Local<Promise> promise,
diff --git a/deps/v8/test/unittests/heap/barrier-unittest.cc b/deps/v8/test/unittests/heap/barrier-unittest.cc
index 07906b20c1..99cf5d8978 100644
--- a/deps/v8/test/unittests/heap/barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/barrier-unittest.cc
@@ -57,7 +57,7 @@ TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
barrier.Start();
}
for (int i = 0; i < kThreadCount; i++) {
- threads[i].Start();
+ CHECK(threads[i].Start());
}
for (int i = 0; i < kThreadCount; i++) {
threads[i].Join();
@@ -80,7 +80,7 @@ TEST(OneshotBarrier, EarlyFinish_Concurrent) {
barrier.Start();
}
for (int i = 0; i < kThreadCount; i++) {
- threads[i].Start();
+ CHECK(threads[i].Start());
}
for (int i = 0; i < kThreadCount; i++) {
threads[i].Join();
@@ -133,7 +133,7 @@ TEST(OneshotBarrier, Processing_Concurrent) {
barrier.Start();
barrier.Start();
EXPECT_FALSE(barrier.DoneForTesting());
- counting_thread.Start();
+ CHECK(counting_thread.Start());
for (size_t i = 0; i < kWorkCounter; i++) {
{
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 53b919a860..742e86c357 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -355,11 +355,9 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10,
- nullptr);
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1,
- nullptr);
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1);
tracer->Stop(SCAVENGER);
EXPECT_DOUBLE_EQ(
11, tracer->current_
@@ -372,20 +370,19 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
tracer->Start(MINOR_MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector unittest");
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 10, nullptr);
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 10);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 1, nullptr);
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 1);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20,
- nullptr);
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2, nullptr);
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
tracer->AddBackgroundScopeSample(
GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
- 30, nullptr);
+ 30);
tracer->AddBackgroundScopeSample(
GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
- 3, nullptr);
+ 3);
tracer->Stop(MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
11,
@@ -402,33 +399,31 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 100, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 100);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 200, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 200);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 10, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 10);
// Scavenger should not affect the major mark-compact scopes.
tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
tracer->Stop(SCAVENGER);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 20, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 20);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 1, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 1);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 2, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 2);
tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector unittest");
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 30, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 30);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 3, nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 3);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40,
- nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40);
tracer->AddBackgroundScopeSample(
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4,
- nullptr);
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4);
tracer->Stop(MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
111, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
@@ -448,7 +443,7 @@ class ThreadWithBackgroundScope final : public base::Thread {
: Thread(Options("ThreadWithBackgroundScope")), tracer_(tracer) {}
void Run() override {
GCTracer::BackgroundScope scope(
- tracer_, GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
+ tracer_, GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, nullptr);
}
private:
@@ -460,8 +455,8 @@ TEST_F(GCTracerTest, MultithreadedBackgroundScope) {
ThreadWithBackgroundScope thread1(tracer);
ThreadWithBackgroundScope thread2(tracer);
tracer->ResetForTesting();
- thread1.Start();
- thread2.Start();
+ CHECK(thread1.Start());
+ CHECK(thread2.Start());
tracer->FetchBackgroundMarkCompactCounters();
thread1.Join();
thread2.Join();
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 140d3d45b3..c5acc6c43e 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -143,5 +143,146 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
EXPECT_EQ(code_range6, code_range3);
}
+// Tests that FreeListMany::SelectFreeListCategoryType returns what it should.
+TEST_F(SpacesTest, FreeListManySelectFreeListCategoryType) {
+ FreeListMany free_list;
+
+ // Testing that all sizes below 256 bytes get assigned the correct category
+ for (size_t size = 0; size <= FreeListMany::kPreciseCategoryMaxSize; size++) {
+ FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
+ if (cat == 0) {
+ // If cat == 0, then we make sure that |size| doesn't fit in the 2nd
+ // category.
+ EXPECT_LT(size, free_list.categories_min[1]);
+ } else {
+ // Otherwise, size should fit in |cat|, but not in |cat+1|.
+ EXPECT_LE(free_list.categories_min[cat], size);
+ EXPECT_LT(size, free_list.categories_min[cat + 1]);
+ }
+ }
+
+ // Testing every size above 256 would take long time, so test only some
+ // "interesting cases": picking some number in the middle of the categories,
+ // as well as at the categories' bounds.
+ for (int cat = kFirstCategory + 1; cat <= free_list.last_category_; cat++) {
+ std::vector<size_t> sizes;
+ // Adding size less than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] - 8);
+ // Adding size equal to this category's minimum
+ sizes.push_back(free_list.categories_min[cat]);
+ // Adding size greater than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] + 8);
+ // Adding size between this category's minimum and the next category
+ if (cat != free_list.last_category_) {
+ sizes.push_back(
+ (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+ 2);
+ }
+
+ for (size_t size : sizes) {
+ FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
+ if (cat == free_list.last_category_) {
+ // If cat == last_category, then we make sure that |size| indeeds fits
+ // in the last category.
+ EXPECT_LE(free_list.categories_min[cat], size);
+ } else {
+ // Otherwise, size should fit in |cat|, but not in |cat+1|.
+ EXPECT_LE(free_list.categories_min[cat], size);
+ EXPECT_LT(size, free_list.categories_min[cat + 1]);
+ }
+ }
+ }
+}
+
+// Tests that FreeListMany::GuaranteedAllocatable returns what it should.
+TEST_F(SpacesTest, FreeListManyGuaranteedAllocatable) {
+ FreeListMany free_list;
+
+ for (int cat = kFirstCategory; cat < free_list.last_category_; cat++) {
+ std::vector<size_t> sizes;
+ // Adding size less than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] - 8);
+ // Adding size equal to this category's minimum
+ sizes.push_back(free_list.categories_min[cat]);
+ // Adding size greater than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] + 8);
+ if (cat != free_list.last_category_) {
+ // Adding size between this category's minimum and the next category
+ sizes.push_back(
+ (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+ 2);
+ }
+
+ for (size_t size : sizes) {
+ FreeListCategoryType cat_free =
+ free_list.SelectFreeListCategoryType(size);
+ size_t guaranteed_allocatable = free_list.GuaranteedAllocatable(size);
+ if (cat_free == free_list.last_category_) {
+ // If |cat_free| == last_category, then guaranteed_allocatable must
+ // return the last category, because when allocating, the last category
+ // is searched entirely.
+ EXPECT_EQ(free_list.SelectFreeListCategoryType(guaranteed_allocatable),
+ free_list.last_category_);
+ } else if (size < free_list.categories_min[0]) {
+ // If size < free_list.categories_min[0], then the bytes are wasted, and
+ // guaranteed_allocatable should return 0.
+ EXPECT_EQ(guaranteed_allocatable, 0ul);
+ } else {
+ // Otherwise, |guaranteed_allocatable| is equal to the minimum of
+ // |size|'s category (|cat_free|);
+ EXPECT_EQ(free_list.categories_min[cat_free], guaranteed_allocatable);
+ }
+ }
+ }
+}
+
+// Tests that
+// FreeListManyCachedFastPath::SelectFastAllocationFreeListCategoryType returns
+// what it should.
+TEST_F(SpacesTest,
+ FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType) {
+ FreeListManyCachedFastPath free_list;
+
+ for (int cat = kFirstCategory; cat <= free_list.last_category_; cat++) {
+ std::vector<size_t> sizes;
+ // Adding size less than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] - 8);
+ // Adding size equal to this category's minimum
+ sizes.push_back(free_list.categories_min[cat]);
+ // Adding size greater than this category's minimum
+ sizes.push_back(free_list.categories_min[cat] + 8);
+ // Adding size between this category's minimum and the next category
+ if (cat != free_list.last_category_) {
+ sizes.push_back(
+ (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+ 2);
+ }
+
+ for (size_t size : sizes) {
+ FreeListCategoryType cat =
+ free_list.SelectFastAllocationFreeListCategoryType(size);
+ if (size <= FreeListManyCachedFastPath::kTinyObjectMaxSize) {
+ // For tiny objects, the first category of the fast path should be
+ // chosen.
+ EXPECT_TRUE(cat == FreeListManyCachedFastPath::kFastPathFirstCategory);
+ } else if (size >= free_list.categories_min[free_list.last_category_] -
+ FreeListManyCachedFastPath::kFastPathOffset) {
+ // For objects close to the minimum of the last category, the last
+ // category is chosen.
+ EXPECT_EQ(cat, free_list.last_category_);
+ } else {
+ // For other objects, the chosen category must satisfy that its minimum
+ // is at least |size|+1.85k.
+ EXPECT_GE(free_list.categories_min[cat],
+ size + FreeListManyCachedFastPath::kFastPathOffset);
+ // And the smaller categoriy's minimum is less than |size|+1.85k
+ // (otherwise it would have been chosen instead).
+ EXPECT_LT(free_list.categories_min[cat - 1],
+ size + FreeListManyCachedFastPath::kFastPathOffset);
+ }
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 3d02db7413..a9c631f8d2 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -151,6 +151,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreNamedOwnProperty(reg, name, store_own_slot.ToInt())
.StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
+ // Emit Iterator-protocol operations
+ builder.GetIterator(reg, load_slot.ToInt());
+
// Emit load / store lookup slots.
builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
.LoadLookupSlot(name, TypeofMode::INSIDE_TYPEOF)
@@ -283,7 +286,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
BytecodeLoopHeader loop_header;
BytecodeLabel after_jump1, after_jump2, after_jump3, after_jump4,
after_jump5, after_jump6, after_jump7, after_jump8, after_jump9,
- after_jump10, after_loop;
+ after_jump10, after_jump11, after_loop;
builder.JumpIfNull(&after_loop)
.Bind(&loop_header)
.Jump(&after_jump1)
@@ -296,21 +299,23 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.Bind(&after_jump4)
.JumpIfNotUndefined(&after_jump5)
.Bind(&after_jump5)
- .JumpIfJSReceiver(&after_jump6)
+ .JumpIfUndefinedOrNull(&after_jump6)
.Bind(&after_jump6)
- .JumpIfTrue(ToBooleanMode::kConvertToBoolean, &after_jump7)
+ .JumpIfJSReceiver(&after_jump7)
.Bind(&after_jump7)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &after_jump8)
+ .JumpIfTrue(ToBooleanMode::kConvertToBoolean, &after_jump8)
.Bind(&after_jump8)
- .JumpIfFalse(ToBooleanMode::kConvertToBoolean, &after_jump9)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &after_jump9)
.Bind(&after_jump9)
- .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump10)
+ .JumpIfFalse(ToBooleanMode::kConvertToBoolean, &after_jump10)
.Bind(&after_jump10)
+ .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump11)
+ .Bind(&after_jump11)
.JumpLoop(&loop_header, 0)
.Bind(&after_loop);
}
- BytecodeLabel end[10];
+ BytecodeLabel end[11];
{
// Longer jumps with constant operands
BytecodeLabel after_jump;
@@ -325,8 +330,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfNotNull(&end[6])
.JumpIfUndefined(&end[7])
.JumpIfNotUndefined(&end[8])
+ .JumpIfUndefinedOrNull(&end[9])
.LoadLiteral(ast_factory.prototype_string())
- .JumpIfJSReceiver(&end[9]);
+ .JumpIfJSReceiver(&end[10]);
}
// Emit Smi table switch bytecode.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 6e7b945231..339fc33178 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -146,6 +146,8 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()));
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
PositionTableEntry expected_positions[] = {
@@ -236,6 +238,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()));
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -288,6 +292,8 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()));
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -356,6 +362,8 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()));
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index cb9a83997e..a8ff998107 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -14,7 +14,9 @@
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
+using ::testing::Eq;
using v8::internal::compiler::Node;
+using v8::internal::compiler::TNode;
namespace c = v8::internal::compiler;
@@ -441,7 +443,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
{
- Node* index = m.IntPtrConstant(2);
+ TNode<IntPtrT> index = m.IntPtrConstant(2);
Node* load_constant = m.LoadConstantPoolEntry(index);
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
@@ -511,16 +513,17 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* object = m.IntPtrConstant(0xDEADBEEF);
+ TNode<HeapObject> object =
+ m.ReinterpretCast<HeapObject>(m.IntPtrConstant(0xDEADBEEF));
int offset = 16;
- Node* load_field = m.LoadObjectField(object, offset);
+ TNode<Object> load_field = m.LoadObjectField(object, offset);
#ifdef V8_COMPRESS_POINTERS
EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
- MachineType::AnyCompressed(), object,
+ MachineType::AnyCompressed(), Eq(object),
c::IsIntPtrConstant(offset - kHeapObjectTag))));
#else
EXPECT_THAT(load_field, m.IsLoadFromObject(
- MachineType::AnyTagged(), object,
+ MachineType::AnyTagged(), Eq(object),
c::IsIntPtrConstant(offset - kHeapObjectTag)));
#endif
}
@@ -530,12 +533,14 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* arg1 = m.Int32Constant(2);
- Node* arg2 = m.Int32Constant(3);
- Node* context = m.Int32Constant(4);
- Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
- EXPECT_THAT(call_runtime, c::IsCall(_, _, arg1, arg2, _,
- c::IsInt32Constant(2), context, _, _));
+ TNode<Object> arg1 = m.ReinterpretCast<Object>(m.Int32Constant(2));
+ TNode<Object> arg2 = m.ReinterpretCast<Object>(m.Int32Constant(3));
+ TNode<Object> context = m.ReinterpretCast<Object>(m.Int32Constant(4));
+ TNode<Object> call_runtime =
+ m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
+ EXPECT_THAT(call_runtime,
+ c::IsCall(_, _, Eq(arg1), Eq(arg2), _, c::IsInt32Constant(2),
+ Eq(context), _, _));
}
}
@@ -549,29 +554,30 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Callable builtin =
CodeFactory::InterpreterCEntry(isolate(), result_size);
- Node* function_id = m.Int32Constant(0);
+ TNode<Int32T> function_id = m.Int32Constant(0);
InterpreterAssembler::RegListNodePair registers(m.IntPtrConstant(1),
m.Int32Constant(2));
- Node* context = m.IntPtrConstant(4);
+ TNode<Object> context = m.ReinterpretCast<Object>(m.Int32Constant(4));
Matcher<Node*> function_table = c::IsExternalConstant(
ExternalReference::runtime_function_table_address_for_unittests(
isolate()));
- Matcher<Node*> function = c::IsIntPtrAdd(
- function_table,
- c::IsChangeUint32ToWord(c::IsInt32Mul(
- function_id, c::IsInt32Constant(sizeof(Runtime::Function)))));
+ Matcher<Node*> function =
+ c::IsIntPtrAdd(function_table,
+ c::IsChangeUint32ToWord(c::IsInt32Mul(
+ Eq(function_id),
+ c::IsInt32Constant(sizeof(Runtime::Function)))));
Matcher<Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
Node* call_runtime =
m.CallRuntimeN(function_id, context, registers, result_size);
- EXPECT_THAT(
- call_runtime,
- c::IsCall(_, c::IsHeapConstant(builtin.code()),
- registers.reg_count(), registers.base_reg_location(),
- function_entry, context, _, _));
+ EXPECT_THAT(call_runtime,
+ c::IsCall(_, c::IsHeapConstant(builtin.code()),
+ Eq(registers.reg_count()),
+ Eq(registers.base_reg_location()), function_entry,
+ Eq(context), _, _));
}
}
}
@@ -581,12 +587,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* feedback_vector = m.LoadFeedbackVector();
+ TNode<HeapObject> feedback_vector = m.LoadFeedbackVector();
// Feedback vector is a phi node with two inputs. One of them is loading the
// feedback vector and the other is undefined constant (when feedback
// vectors aren't allocated). Find the input that loads feedback vector.
- CHECK(feedback_vector->opcode() == i::compiler::IrOpcode::kPhi);
+ CHECK_EQ(static_cast<Node*>(feedback_vector)->opcode(),
+ i::compiler::IrOpcode::kPhi);
Node* value0 =
i::compiler::NodeProperties::GetValueInput(feedback_vector, 0);
Node* value1 =
@@ -601,21 +608,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
c::IsIntPtrConstant(Register::function_closure().ToOperand() *
kSystemPointerSize)));
#ifdef V8_COMPRESS_POINTERS
- Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
- m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag)));
+ Matcher<Node*> load_vector_cell_matcher =
+ IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+ MachineType::CompressedPointer(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+ kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector,
- IsChangeCompressedToTagged(m.IsLoadFromObject(
- MachineType::AnyCompressed(), load_vector_cell_matcher,
+ IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+ MachineType::CompressedPointer(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
#else
Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(), load_function_matcher,
+ MachineType::TaggedPointer(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
EXPECT_THAT(load_feedback_vector,
m.IsLoadFromObject(
- MachineType::AnyTagged(), load_vector_cell_matcher,
+ MachineType::TaggedPointer(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
#endif
}
diff --git a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
index 4001048a8e..1ae440d0c0 100644
--- a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
@@ -51,8 +51,8 @@ TEST(TaskQueueTest, TerminateMultipleReaders) {
TaskQueue queue;
TaskQueueThread thread1(&queue);
TaskQueueThread thread2(&queue);
- thread1.Start();
- thread2.Start();
+ CHECK(thread1.Start());
+ CHECK(thread2.Start());
queue.Terminate();
thread1.Join();
thread2.Join();
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 8b425542c1..8c3fb017a4 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -71,7 +71,7 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
ast_node_factory.NewFunctionLiteral(
function_name, function_scope, statements, -1, -1, -1,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
+ FunctionSyntaxKind::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
shared->function_literal_id(), nullptr);
diff --git a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
index b3843db46d..2a0e7d7f90 100644
--- a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
@@ -160,8 +160,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStarted) {
ResultType result2{0};
ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
- runner1.Start();
- runner2.Start();
+ CHECK(runner1.Start());
+ CHECK(runner2.Start());
// Busy wait on result to make sure both tasks are done.
while (result1.load() == 0 || result2.load() == 0) {
}
@@ -179,8 +179,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRun) {
ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
CancelAndWait();
// Tasks are canceled, hence the runner will bail out and not update result.
- runner1.Start();
- runner2.Start();
+ CHECK(runner1.Start());
+ CHECK(runner2.Start());
runner1.Join();
runner2.Join();
EXPECT_EQ(0u, result1);
@@ -193,7 +193,7 @@ TEST_F(CancelableTaskManagerTest, RemoveBeforeCancelAndWait) {
CancelableTaskManager::Id id = runner1.task_id();
EXPECT_EQ(1u, id);
EXPECT_EQ(TryAbortResult::kTaskAborted, manager()->TryAbort(id));
- runner1.Start();
+ CHECK(runner1.Start());
runner1.Join();
CancelAndWait();
EXPECT_EQ(0u, result1);
@@ -204,7 +204,7 @@ TEST_F(CancelableTaskManagerTest, RemoveAfterCancelAndWait) {
ThreadedRunner runner1(NewTask(&result1));
CancelableTaskManager::Id id = runner1.task_id();
EXPECT_EQ(1u, id);
- runner1.Start();
+ CHECK(runner1.Start());
runner1.Join();
CancelAndWait();
EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(id));
@@ -231,8 +231,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRunTryAbortAll) {
ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
EXPECT_EQ(TryAbortResult::kTaskAborted, TryAbortAll());
// Tasks are canceled, hence the runner will bail out and not update result.
- runner1.Start();
- runner2.Start();
+ CHECK(runner1.Start());
+ CHECK(runner2.Start());
runner1.Join();
runner2.Join();
EXPECT_EQ(0u, result1);
@@ -245,7 +245,7 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStartedTryAbortAll) {
ResultType result2{0};
ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
- runner1.Start();
+ CHECK(runner1.Start());
// Busy wait on result to make sure task1 is done.
while (result1.load() == 0) {
}
@@ -255,7 +255,7 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStartedTryAbortAll) {
EXPECT_THAT(TryAbortAll(),
testing::AnyOf(testing::Eq(TryAbortResult::kTaskAborted),
testing::Eq(TryAbortResult::kTaskRunning)));
- runner2.Start();
+ CHECK(runner2.Start());
runner1.Join();
runner2.Join();
EXPECT_EQ(1u, result1);
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 1366f86ce7..22ee754321 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -20,6 +20,13 @@ constexpr const char* kTestTorquePrelude = R"(
type void;
type never;
+namespace torque_internal {
+ struct Reference<T: type> {
+ const object: HeapObject;
+ const offset: intptr;
+ }
+}
+
type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';
type Smi extends Tagged generates 'TNode<Smi>' constexpr 'Smi';
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index c36a0b70f8..def90fc3b5 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -39,4 +39,17 @@
'Torque*': [SKIP],
}], # 'system == windows and asan'
+['system == windows and arch == x64 and mode == release', {
+ # BUG(992783).
+ 'Torque.ConditionalFields': [SKIP],
+ 'Torque.UsingUnderscorePrefixedIdentifierError': [SKIP],
+}], # 'system == windows and arch == x64 and mode == release'
+
+##############################################################################
+['tsan == True', {
+ # https://crbug.com/v8/9380
+ # The test is broken and needs to be fixed to use separate isolates.
+ 'BackingStoreTest.RacyGrowWasmMemoryInPlace': [SKIP],
+}], # 'tsan == True'
+
]
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 725f7f4a59..791770ee94 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -1448,28 +1448,18 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_void) {
WASM_ZERO, WASM_ZERO)});
}
-#define BYTE0(x) ((x)&0x7F)
-#define BYTE1(x) ((x >> 7) & 0x7F)
-#define BYTE2(x) ((x >> 14) & 0x7F)
-#define BYTE3(x) ((x >> 21) & 0x7F)
-
-#define VARINT1(x) BYTE0(x)
-#define VARINT2(x) BYTE0(x) | 0x80, BYTE1(x)
-#define VARINT3(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x)
-#define VARINT4(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x) | 0x80, BYTE3(x)
-
TEST_F(FunctionBodyDecoderTest, LoadMemOffset_varint) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
ExpectValidates(sigs.i_i(),
- {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT, VARINT1(0x45)});
+ {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT, U32V_1(0x45)});
ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT2(0x3999)});
+ U32V_2(0x3999)});
ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT3(0x344445)});
+ U32V_3(0x344445)});
ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT4(0x36666667)});
+ U32V_4(0x36666667)});
}
TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
@@ -1477,25 +1467,15 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
module = builder.module();
builder.InitializeMemory();
ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT1(0x33)});
+ ZERO_ALIGNMENT, U32V_1(0x33)});
ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT2(0x1111)});
+ ZERO_ALIGNMENT, U32V_2(0x1111)});
ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT3(0x222222)});
+ ZERO_ALIGNMENT, U32V_3(0x222222)});
ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT4(0x44444444)});
+ ZERO_ALIGNMENT, U32V_4(0x44444444)});
}
-#undef BYTE0
-#undef BYTE1
-#undef BYTE2
-#undef BYTE3
-
-#undef VARINT1
-#undef VARINT2
-#undef VARINT3
-#undef VARINT4
-
TEST_F(FunctionBodyDecoderTest, AllLoadMemCombinations) {
TestModuleBuilder builder;
module = builder.module();
@@ -2104,6 +2084,9 @@ TEST_F(FunctionBodyDecoderTest, TableGet) {
ExpectValidates(
&sig,
{WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func2, WASM_I32V(7)))});
+ ExpectValidates(
+ &sig, {WASM_SET_LOCAL(local_ref, WASM_SEQ(WASM_I32V(6), kExprTableGet,
+ U32V_2(tab_ref1)))});
// We can store funcref values as anyref, but not the other way around.
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
@@ -3542,6 +3525,24 @@ TEST_F(WasmOpcodeLengthTest, VariableLength) {
ExpectLength(4, kExprRefFunc, U32V_3(44));
ExpectLength(5, kExprRefFunc, U32V_4(66));
ExpectLength(6, kExprRefFunc, U32V_5(77));
+
+ ExpectLength(2, kExprTableGet, U32V_1(1));
+ ExpectLength(3, kExprTableGet, U32V_2(33));
+ ExpectLength(4, kExprTableGet, U32V_3(44));
+ ExpectLength(5, kExprTableGet, U32V_4(66));
+ ExpectLength(6, kExprTableGet, U32V_5(77));
+
+ ExpectLength(2, kExprTableSet, U32V_1(1));
+ ExpectLength(3, kExprTableSet, U32V_2(33));
+ ExpectLength(4, kExprTableSet, U32V_3(44));
+ ExpectLength(5, kExprTableSet, U32V_4(66));
+ ExpectLength(6, kExprTableSet, U32V_5(77));
+
+ ExpectLength(3, kExprCallIndirect, U32V_1(1), U32V_1(1));
+ ExpectLength(4, kExprCallIndirect, U32V_1(1), U32V_2(33));
+ ExpectLength(5, kExprCallIndirect, U32V_1(1), U32V_3(44));
+ ExpectLength(6, kExprCallIndirect, U32V_1(1), U32V_4(66));
+ ExpectLength(7, kExprCallIndirect, U32V_1(1), U32V_5(77));
}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 1659370999..9f7cfc6b1d 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -465,7 +465,7 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
CHECK(!GetThreadInWasmFlag());
// Set the thread-in-wasm flag manually in this thread.
*trap_handler::GetThreadInWasmThreadLocalAddress() = 1;
- runner.Start();
+ CHECK(runner.Start());
runner.Join();
CHECK(GetThreadInWasmFlag());
// Reset the thread-in-wasm flag.
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index eea1f8208d..a6b29ffc6c 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -156,8 +156,6 @@ class WasmCodeManagerTest : public TestWithContext,
public ::testing::WithParamInterface<ModuleStyle> {
public:
static constexpr uint32_t kNumFunctions = 10;
- static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
- JumpTableAssembler::SizeForNumberOfSlots(kNumFunctions));
static size_t allocate_page_size;
static size_t commit_page_size;
@@ -169,6 +167,7 @@ class WasmCodeManagerTest : public TestWithContext,
}
CHECK_NE(0, allocate_page_size);
CHECK_NE(0, commit_page_size);
+ manager()->DisableImplicitAllocationsForTesting();
}
using NativeModulePtr = std::shared_ptr<NativeModule>;
@@ -199,12 +198,6 @@ class WasmCodeManagerTest : public TestWithContext,
void SetMaxCommittedMemory(size_t limit) {
manager()->SetMaxCommittedMemoryForTesting(limit);
}
-
- void DisableWin64UnwindInfoForTesting() {
-#if defined(V8_OS_WIN_X64)
- manager()->DisableWin64UnwindInfoForTesting();
-#endif
- }
};
// static
@@ -219,18 +212,18 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
SetMaxCommittedMemory(0);
CHECK_EQ(0, manager()->committed_code_space());
- ASSERT_DEATH_IF_SUPPORTED(AllocModule(allocate_page_size, GetParam()),
+ NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
+ ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, kCodeAlignment),
"OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
SetMaxCommittedMemory(allocate_page_size);
- DisableWin64UnwindInfoForTesting();
CHECK_EQ(0, manager()->committed_code_space());
NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
CHECK(native_module);
- CHECK_EQ(commit_page_size, manager()->committed_code_space());
+ CHECK_EQ(0, manager()->committed_code_space());
WasmCodeRefScope code_ref_scope;
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
@@ -242,7 +235,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(native_module.get(), index++,
- allocate_page_size - 4 * kCodeAlignment - kJumpTableSize);
+ allocate_page_size - 4 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(allocate_page_size, manager()->committed_code_space());
@@ -256,29 +249,25 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
SetMaxCommittedMemory(3 * allocate_page_size);
- DisableWin64UnwindInfoForTesting();
NativeModulePtr nm1 = AllocModule(2 * allocate_page_size, GetParam());
NativeModulePtr nm2 = AllocModule(2 * allocate_page_size, GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCodeRefScope code_ref_scope;
- WasmCode* code =
- AddCode(nm1.get(), 0, 2 * allocate_page_size - kJumpTableSize);
+ WasmCode* code = AddCode(nm1.get(), 0, 2 * allocate_page_size);
CHECK_NOT_NULL(code);
- ASSERT_DEATH_IF_SUPPORTED(
- AddCode(nm2.get(), 0, 2 * allocate_page_size - kJumpTableSize),
- "OOM in wasm code commit");
+ ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * allocate_page_size),
+ "OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
SetMaxCommittedMemory(3 * allocate_page_size);
- DisableWin64UnwindInfoForTesting();
NativeModulePtr nm = AllocModule(allocate_page_size, GetParam());
size_t module_size =
GetParam() == Fixed ? kMaxWasmCodeMemory : allocate_page_size;
- size_t remaining_space_in_module = module_size - kJumpTableSize;
+ size_t remaining_space_in_module = module_size;
if (GetParam() == Fixed) {
// Requesting more than the remaining space fails because the module cannot
// grow.
@@ -297,7 +286,6 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
TEST_P(WasmCodeManagerTest, CommitIncrements) {
SetMaxCommittedMemory(10 * allocate_page_size);
- DisableWin64UnwindInfoForTesting();
NativeModulePtr nm = AllocModule(3 * allocate_page_size, GetParam());
WasmCodeRefScope code_ref_scope;
@@ -308,15 +296,13 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
CHECK_NOT_NULL(code);
CHECK_EQ(commit_page_size + 2 * allocate_page_size,
manager()->committed_code_space());
- code = AddCode(nm.get(), 2,
- allocate_page_size - kCodeAlignment - kJumpTableSize);
+ code = AddCode(nm.get(), 2, allocate_page_size - kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(3 * allocate_page_size, manager()->committed_code_space());
}
TEST_P(WasmCodeManagerTest, Lookup) {
SetMaxCommittedMemory(2 * allocate_page_size);
- DisableWin64UnwindInfoForTesting();
NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
NativeModulePtr nm2 = AllocModule(allocate_page_size, GetParam());
@@ -362,7 +348,6 @@ TEST_P(WasmCodeManagerTest, Lookup) {
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
SetMaxCommittedMemory(2 * allocate_page_size);
- DisableWin64UnwindInfoForTesting();
NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
diff --git a/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc
new file mode 100644
index 0000000000..04c611e1de
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc
@@ -0,0 +1,224 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-module-sourcemap.h"
+#include <memory>
+
+#include "src/api/api.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleSourceMapTest : public TestWithIsolateAndZone {};
+
+TEST_F(WasmModuleSourceMapTest, InvalidSourceMap) {
+ auto i_isolate = isolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+
+ // Incomplete source map without "sources" entry.
+ char incomplete_src_map[] =
+ "{\"version\":3,\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto incomplete_src_map_str =
+ v8::String::NewFromUtf8(v8_isolate, incomplete_src_map).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> incomplete_src_map_ptr(
+ new WasmModuleSourceMap(v8_isolate, incomplete_src_map_str));
+ EXPECT_FALSE(incomplete_src_map_ptr->IsValid());
+
+ // Miswrite key "mappings" as "mapping".
+ char wrong_key[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mapping\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto wrong_key_str =
+ v8::String::NewFromUtf8(v8_isolate, wrong_key).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> wrong_key_ptr(
+ new WasmModuleSourceMap(v8_isolate, wrong_key_str));
+ EXPECT_FALSE(wrong_key_ptr->IsValid());
+
+ // Wrong version number.
+ char wrong_ver[] =
+ "{\"version\":2,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto wrong_ver_str =
+ v8::String::NewFromUtf8(v8_isolate, wrong_ver).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> wrong_ver_ptr(
+ new WasmModuleSourceMap(v8_isolate, wrong_ver_str));
+ EXPECT_FALSE(wrong_ver_ptr->IsValid());
+
+ // Wrong type of "version" entry.
+ char ver_as_arr[] =
+ "{\"version\":[3],\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto ver_as_arr_str =
+ v8::String::NewFromUtf8(v8_isolate, ver_as_arr).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> ver_as_arr_ptr(
+ new WasmModuleSourceMap(v8_isolate, ver_as_arr_str));
+ EXPECT_FALSE(ver_as_arr_ptr->IsValid());
+
+ // Wrong type of "sources" entry.
+ char sources_as_str[] =
+ "{\"version\":3,\"sources\":\"./"
+ "test.h,main.cpp\",\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto sources_as_str_str =
+ v8::String::NewFromUtf8(v8_isolate, sources_as_str).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> sources_as_str_ptr(
+ new WasmModuleSourceMap(v8_isolate, sources_as_str_str));
+ EXPECT_FALSE(sources_as_str_ptr->IsValid());
+
+ // Invalid "mappings" entry.
+ char wrong_mappings[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "&BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto wrong_mappings_str =
+ v8::String::NewFromUtf8(v8_isolate, wrong_mappings).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> wrong_mappings_ptr(
+ new WasmModuleSourceMap(v8_isolate, wrong_mappings_str));
+ EXPECT_FALSE(wrong_mappings_ptr->IsValid());
+}
+
+TEST_F(WasmModuleSourceMapTest, HasSource) {
+ char src_map[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto i_isolate = isolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ auto src_map_str =
+ v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+ new WasmModuleSourceMap(v8_isolate, src_map_str));
+ EXPECT_TRUE(src_map_ptr->IsValid());
+
+ EXPECT_FALSE(src_map_ptr->HasSource(0x387, 0x3AF));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x3B0, 0x3B5));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x3B6, 0x3BC));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x3BD, 0x3C7));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x3C8, 0x3DA));
+ EXPECT_TRUE(src_map_ptr->HasSource(0x3DB, 0x414));
+ EXPECT_TRUE(src_map_ptr->HasSource(0x415, 0x44E));
+ EXPECT_TRUE(src_map_ptr->HasSource(0x450, 0x4DC));
+ EXPECT_TRUE(src_map_ptr->HasSource(0x4DE, 0x5F1));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5F3, 0x437A));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x437C, 0x5507));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5508, 0x5557));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5559, 0x5609));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x560A, 0x563D));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x563E, 0x564A));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x564B, 0x5656));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5658, 0x5713));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5715, 0x59B0));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x59B1, 0x59BC));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x59BD, 0x59C6));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x59C7, 0x59D8));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x59D9, 0x59E7));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x59E9, 0x5B50));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5B52, 0x5C53));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5C54, 0x5C57));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5C59, 0x5EBD));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x5EBF, 0x6030));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x6031, 0x608D));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x608E, 0x609E));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x609F, 0x60B3));
+ EXPECT_FALSE(src_map_ptr->HasSource(0x60B4, 0x60BD));
+}
+
+TEST_F(WasmModuleSourceMapTest, HasValidEntry) {
+ char src_map[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto i_isolate = isolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ auto src_map_str =
+ v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+ new WasmModuleSourceMap(v8_isolate, src_map_str));
+ EXPECT_TRUE(src_map_ptr->IsValid());
+
+ EXPECT_FALSE(src_map_ptr->HasValidEntry(0x450, 0x467));
+ EXPECT_FALSE(src_map_ptr->HasValidEntry(0x450, 0x450));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x450, 0x47A));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x450, 0x4A9));
+ EXPECT_FALSE(src_map_ptr->HasValidEntry(0x4DE, 0x4F5));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x541));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x57D));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x5B7));
+ EXPECT_FALSE(src_map_ptr->HasValidEntry(0x4DE, 0x4DE));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x500));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x521));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x560));
+ EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x597));
+}
+
+TEST_F(WasmModuleSourceMapTest, GetFilename) {
+ char src_map[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto i_isolate = isolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ auto src_map_str =
+ v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+ new WasmModuleSourceMap(v8_isolate, src_map_str));
+ EXPECT_TRUE(src_map_ptr->IsValid());
+
+ EXPECT_STREQ("./test.h", src_map_ptr->GetFilename(0x47A).c_str());
+ EXPECT_STREQ("./test.h", src_map_ptr->GetFilename(0x4A9).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x500).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x521).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x541).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x560).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x57D).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x597).c_str());
+ EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x5B7).c_str());
+}
+
+TEST_F(WasmModuleSourceMapTest, SourceLine) {
+ char src_map[] =
+ "{\"version\":3,\"sources\":[\"./"
+ "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+ "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+ "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+ auto i_isolate = isolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ auto src_map_str =
+ v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+ std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+ new WasmModuleSourceMap(v8_isolate, src_map_str));
+ EXPECT_TRUE(src_map_ptr->IsValid());
+
+ EXPECT_EQ(13ul, src_map_ptr->GetSourceLine(0x47A));
+ EXPECT_EQ(14ul, src_map_ptr->GetSourceLine(0x4A9));
+ EXPECT_EQ(5ul, src_map_ptr->GetSourceLine(0x500));
+ EXPECT_EQ(7ul, src_map_ptr->GetSourceLine(0x521));
+ EXPECT_EQ(8ul, src_map_ptr->GetSourceLine(0x541));
+ EXPECT_EQ(11ul, src_map_ptr->GetSourceLine(0x560));
+ EXPECT_EQ(12ul, src_map_ptr->GetSourceLine(0x57D));
+ EXPECT_EQ(15ul, src_map_ptr->GetSourceLine(0x597));
+ EXPECT_EQ(16ul, src_map_ptr->GetSourceLine(0x5B7));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8