summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8/test/unittests
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadandroid-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.bz2
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.zip
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/BUILD.gn77
-rw-r--r--deps/v8/test/unittests/base/atomic-utils-unittest.cc (renamed from deps/v8/test/unittests/atomic-utils-unittest.cc)29
-rw-r--r--deps/v8/test/unittests/base/ieee754-unittest.cc405
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc75
-rw-r--r--deps/v8/test/unittests/base/sys-info-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc156
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc381
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc1087
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc628
-rw-r--r--deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc59
-rw-r--r--deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc268
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc178
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc58
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc399
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc23
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h3
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc65
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h7
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc22
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc243
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc1269
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc86
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc144
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc526
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc89
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc227
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc188
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc539
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc37
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc135
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/node-matchers-unittest.cc53
-rw-r--r--deps/v8/test/unittests/compiler/node-properties-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc458
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h87
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc30
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc272
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc75
-rw-r--r--deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc57
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc40
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc150
-rw-r--r--deps/v8/test/unittests/eh-frame-iterator-unittest.cc61
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc464
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc190
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc160
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc14
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc511
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc119
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc254
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc149
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc87
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc531
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc185
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc219
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h37
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc161
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc229
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc376
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h2
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc16
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc166
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc (renamed from deps/v8/test/unittests/interpreter/source-position-table-unittest.cc)50
-rw-r--r--deps/v8/test/unittests/test-utils.cc18
-rw-r--r--deps/v8/test/unittests/test-utils.h6
-rw-r--r--deps/v8/test/unittests/unittests.gyp296
-rw-r--r--deps/v8/test/unittests/unittests.status8
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc1368
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS6
-rw-r--r--deps/v8/test/unittests/wasm/asm-types-unittest.cc723
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc1400
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc402
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc39
-rw-r--r--deps/v8/test/unittests/wasm/encoder-unittest.cc196
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc191
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc53
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc992
-rw-r--r--deps/v8/test/unittests/wasm/switch-logic-unittest.cc89
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc94
89 files changed, 13725 insertions, 4907 deletions
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
new file mode 100644
index 0000000000..7193afb966
--- /dev/null
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -0,0 +1,77 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# The sources are kept automatically in sync with unittests.gyp.
+
+import("../../gni/v8.gni")
+
+gypi_values = exec_script("//build/gypi_to_gn.py",
+ [ rebase_path("unittests.gyp") ],
+ "scope",
+ [ "unittests.gyp" ])
+
+v8_executable("unittests") {
+ testonly = true
+
+ sources = gypi_values.unittests_sources
+
+ if (v8_current_cpu == "arm") {
+ sources += gypi_values.unittests_sources_arm
+ } else if (v8_current_cpu == "arm64") {
+ sources += gypi_values.unittests_sources_arm64
+ } else if (v8_current_cpu == "x86") {
+ sources += gypi_values.unittests_sources_ia32
+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
+ sources += gypi_values.unittests_sources_mips
+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ sources += gypi_values.unittests_sources_mips64
+ } else if (v8_current_cpu == "x64") {
+ sources += gypi_values.unittests_sources_x64
+ } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ sources += gypi_values.unittests_sources_ppc
+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ sources += gypi_values.unittests_sources_s390
+ }
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ # TODO(machenbach): Translate from gyp.
+ #['OS=="aix"', {
+ # 'ldflags': [ '-Wl,-bbigtoc' ],
+ #}],
+
+ deps = [
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ if (is_component_build) {
+ # compiler-unittests can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ if (is_win) {
+ # This warning is benignly triggered by the U16 and U32 macros in
+ # bytecode-utils.h.
+ # C4309: 'static_cast': truncation of constant value
+ cflags = [ "/wd4309" ]
+
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags = [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
diff --git a/deps/v8/test/unittests/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index ad33853d58..8e90c423e2 100644
--- a/deps/v8/test/unittests/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -4,11 +4,11 @@
#include <limits.h>
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
-namespace internal {
+namespace base {
TEST(AtomicNumber, Constructor) {
// Test some common types.
@@ -58,6 +58,29 @@ TEST(AtomicNumber, Increment) {
EXPECT_EQ(std::numeric_limits<size_t>::max(), c.Value());
}
+TEST(AtomicNumber, Decrement) {
+ AtomicNumber<size_t> a(std::numeric_limits<size_t>::max());
+ a.Increment(1);
+ EXPECT_EQ(0, a.Value());
+ a.Decrement(1);
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), a.Value());
+}
+
+TEST(AtomicNumber, OperatorAdditionAssignment) {
+ AtomicNumber<size_t> a(0u);
+ AtomicNumber<size_t> b(std::numeric_limits<size_t>::max());
+ a += b.Value();
+ EXPECT_EQ(a.Value(), b.Value());
+ EXPECT_EQ(b.Value(), std::numeric_limits<size_t>::max());
+}
+
+TEST(AtomicNumber, OperatorSubtractionAssignment) {
+ AtomicNumber<size_t> a(std::numeric_limits<size_t>::max());
+ AtomicNumber<size_t> b(std::numeric_limits<size_t>::max());
+ a -= b.Value();
+ EXPECT_EQ(a.Value(), 0u);
+ EXPECT_EQ(b.Value(), std::numeric_limits<size_t>::max());
+}
namespace {
@@ -213,5 +236,5 @@ TEST(AtomicEnumSet, Equality) {
EXPECT_FALSE(a != b);
}
-} // namespace internal
+} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/ieee754-unittest.cc b/deps/v8/test/unittests/base/ieee754-unittest.cc
new file mode 100644
index 0000000000..2110b63976
--- /dev/null
+++ b/deps/v8/test/unittests/base/ieee754-unittest.cc
@@ -0,0 +1,405 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/ieee754.h"
+#include "src/base/macros.h"
+#include "testing/gmock-support.h"
+#include "testing/gtest-support.h"
+
+using testing::BitEq;
+using testing::IsNaN;
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+namespace {
+
+double const kE = 2.718281828459045;
+double const kPI = 3.141592653589793;
+double const kTwo120 = 1.329227995784916e+36;
+double const kInfinity = std::numeric_limits<double>::infinity();
+double const kQNaN = std::numeric_limits<double>::quiet_NaN();
+double const kSNaN = std::numeric_limits<double>::signaling_NaN();
+
+} // namespace
+
+TEST(Ieee754, Acos) {
+ EXPECT_THAT(acos(kInfinity), IsNaN());
+ EXPECT_THAT(acos(-kInfinity), IsNaN());
+ EXPECT_THAT(acos(kQNaN), IsNaN());
+ EXPECT_THAT(acos(kSNaN), IsNaN());
+
+ EXPECT_EQ(0.0, acos(1.0));
+}
+
+TEST(Ieee754, Acosh) {
+ // Tests for acosh for exceptional values
+ EXPECT_EQ(kInfinity, acosh(kInfinity));
+ EXPECT_THAT(acosh(-kInfinity), IsNaN());
+ EXPECT_THAT(acosh(kQNaN), IsNaN());
+ EXPECT_THAT(acosh(kSNaN), IsNaN());
+ EXPECT_THAT(acosh(0.9), IsNaN());
+
+ // Test basic acosh functionality
+ EXPECT_EQ(0.0, acosh(1.0));
+ // acosh(1.5) = log((sqrt(5)+3)/2), case 1 < x < 2
+ EXPECT_EQ(0.9624236501192069e0, acosh(1.5));
+ // acosh(4) = log(sqrt(15)+4), case 2 < x < 2^28
+ EXPECT_EQ(2.0634370688955608e0, acosh(4.0));
+ // acosh(2^50), case 2^28 < x
+ EXPECT_EQ(35.35050620855721e0, acosh(1125899906842624.0));
+ // acosh(most-positive-float), no overflow
+ EXPECT_EQ(710.4758600739439e0, acosh(1.7976931348623157e308));
+}
+
+TEST(Ieee754, Asin) {
+ EXPECT_THAT(asin(kInfinity), IsNaN());
+ EXPECT_THAT(asin(-kInfinity), IsNaN());
+ EXPECT_THAT(asin(kQNaN), IsNaN());
+ EXPECT_THAT(asin(kSNaN), IsNaN());
+
+ EXPECT_THAT(asin(0.0), BitEq(0.0));
+ EXPECT_THAT(asin(-0.0), BitEq(-0.0));
+}
+
+TEST(Ieee754, Asinh) {
+ // Tests for asinh for exceptional values
+ EXPECT_EQ(kInfinity, asinh(kInfinity));
+ EXPECT_EQ(-kInfinity, asinh(-kInfinity));
+ EXPECT_THAT(asin(kQNaN), IsNaN());
+ EXPECT_THAT(asin(kSNaN), IsNaN());
+
+ // Test basic asinh functionality
+ EXPECT_THAT(asinh(0.0), BitEq(0.0));
+ EXPECT_THAT(asinh(-0.0), BitEq(-0.0));
+ // asinh(2^-29) = 2^-29, case |x| < 2^-28, where acosh(x) = x
+ EXPECT_EQ(1.862645149230957e-9, asinh(1.862645149230957e-9));
+ // asinh(-2^-29) = -2^-29, case |x| < 2^-28, where acosh(x) = x
+ EXPECT_EQ(-1.862645149230957e-9, asinh(-1.862645149230957e-9));
+ // asinh(2^-28), case 2 > |x| >= 2^-28
+ EXPECT_EQ(3.725290298461914e-9, asinh(3.725290298461914e-9));
+ // asinh(-2^-28), case 2 > |x| >= 2^-28
+ EXPECT_EQ(-3.725290298461914e-9, asinh(-3.725290298461914e-9));
+ // asinh(1), case 2 > |x| > 2^-28
+ EXPECT_EQ(0.881373587019543e0, asinh(1.0));
+ // asinh(-1), case 2 > |x| > 2^-28
+ EXPECT_EQ(-0.881373587019543e0, asinh(-1.0));
+ // asinh(5), case 2^28 > |x| > 2
+ EXPECT_EQ(2.3124383412727525e0, asinh(5.0));
+ // asinh(-5), case 2^28 > |x| > 2
+ EXPECT_EQ(-2.3124383412727525e0, asinh(-5.0));
+ // asinh(2^28), case 2^28 > |x|
+ EXPECT_EQ(20.101268236238415e0, asinh(268435456.0));
+ // asinh(-2^28), case 2^28 > |x|
+ EXPECT_EQ(-20.101268236238415e0, asinh(-268435456.0));
+ // asinh(<most-positive-float>), no overflow
+ EXPECT_EQ(710.4758600739439e0, asinh(1.7976931348623157e308));
+ // asinh(-<most-positive-float>), no overflow
+ EXPECT_EQ(-710.4758600739439e0, asinh(-1.7976931348623157e308));
+}
+
+TEST(Ieee754, Atan) {
+ EXPECT_THAT(atan(kQNaN), IsNaN());
+ EXPECT_THAT(atan(kSNaN), IsNaN());
+ EXPECT_THAT(atan(-0.0), BitEq(-0.0));
+ EXPECT_THAT(atan(0.0), BitEq(0.0));
+ EXPECT_DOUBLE_EQ(1.5707963267948966, atan(kInfinity));
+ EXPECT_DOUBLE_EQ(-1.5707963267948966, atan(-kInfinity));
+}
+
+TEST(Ieee754, Atan2) {
+ EXPECT_THAT(atan2(kQNaN, kQNaN), IsNaN());
+ EXPECT_THAT(atan2(kQNaN, kSNaN), IsNaN());
+ EXPECT_THAT(atan2(kSNaN, kQNaN), IsNaN());
+ EXPECT_THAT(atan2(kSNaN, kSNaN), IsNaN());
+ EXPECT_DOUBLE_EQ(0.7853981633974483, atan2(kInfinity, kInfinity));
+ EXPECT_DOUBLE_EQ(2.356194490192345, atan2(kInfinity, -kInfinity));
+ EXPECT_DOUBLE_EQ(-0.7853981633974483, atan2(-kInfinity, kInfinity));
+ EXPECT_DOUBLE_EQ(-2.356194490192345, atan2(-kInfinity, -kInfinity));
+}
+
+TEST(Ieee754, Atanh) {
+ EXPECT_THAT(atanh(kQNaN), IsNaN());
+ EXPECT_THAT(atanh(kSNaN), IsNaN());
+ EXPECT_THAT(atanh(kInfinity), IsNaN());
+ EXPECT_EQ(kInfinity, atanh(1));
+ EXPECT_EQ(-kInfinity, atanh(-1));
+ EXPECT_DOUBLE_EQ(0.54930614433405478, atanh(0.5));
+}
+
+TEST(Ieee754, Cos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(cos(kQNaN), IsNaN());
+ EXPECT_THAT(cos(kSNaN), IsNaN());
+ EXPECT_THAT(cos(kInfinity), IsNaN());
+ EXPECT_THAT(cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / cos(-0.0));
+ EXPECT_EQ(1.0, 1 / cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, cos(0.25));
+ EXPECT_EQ(0.8775825618903728, cos(0.5));
+ EXPECT_EQ(0.7073882691671998, cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, cos(-kTwo120));
+}
+
+TEST(Ieee754, Cosh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(cosh(kQNaN), IsNaN());
+ EXPECT_THAT(cosh(kSNaN), IsNaN());
+ EXPECT_THAT(cosh(kInfinity), kInfinity);
+ EXPECT_THAT(cosh(-kInfinity), kInfinity);
+ EXPECT_EQ(1, cosh(0.0));
+ EXPECT_EQ(1, cosh(-0.0));
+}
+
+TEST(Ieee754, Exp) {
+ EXPECT_THAT(exp(kQNaN), IsNaN());
+ EXPECT_THAT(exp(kSNaN), IsNaN());
+ EXPECT_EQ(0.0, exp(-kInfinity));
+ EXPECT_EQ(0.0, exp(-1000));
+ EXPECT_EQ(0.0, exp(-745.1332191019412));
+ EXPECT_EQ(2.2250738585072626e-308, exp(-708.39641853226408));
+ EXPECT_EQ(3.307553003638408e-308, exp(-708.0));
+ EXPECT_EQ(4.9406564584124654e-324, exp(-7.45133219101941108420e+02));
+ EXPECT_EQ(0.36787944117144233, exp(-1.0));
+ EXPECT_EQ(1.0, exp(-0.0));
+ EXPECT_EQ(1.0, exp(0.0));
+ EXPECT_EQ(1.0, exp(2.2250738585072014e-308));
+
+ // Test that exp(x) is monotonic near 1.
+ EXPECT_GE(exp(1.0), exp(0.9999999999999999));
+ EXPECT_LE(exp(1.0), exp(1.0000000000000002));
+
+ // Test that we produce the correctly rounded result for 1.
+ EXPECT_EQ(kE, exp(1.0));
+
+ EXPECT_EQ(7.38905609893065e0, exp(2.0));
+ EXPECT_EQ(1.7976931348622732e308, exp(7.09782712893383973096e+02));
+ EXPECT_EQ(2.6881171418161356e+43, exp(100.0));
+ EXPECT_EQ(8.218407461554972e+307, exp(709.0));
+ EXPECT_EQ(1.7968190737295725e308, exp(709.7822265625e0));
+ EXPECT_EQ(kInfinity, exp(709.7827128933841e0));
+ EXPECT_EQ(kInfinity, exp(710.0));
+ EXPECT_EQ(kInfinity, exp(1000.0));
+ EXPECT_EQ(kInfinity, exp(kInfinity));
+}
+
+TEST(Ieee754, Expm1) {
+ EXPECT_THAT(expm1(kQNaN), IsNaN());
+ EXPECT_THAT(expm1(kSNaN), IsNaN());
+ EXPECT_EQ(-1.0, expm1(-kInfinity));
+ EXPECT_EQ(kInfinity, expm1(kInfinity));
+ EXPECT_EQ(0.0, expm1(-0.0));
+ EXPECT_EQ(0.0, expm1(0.0));
+ EXPECT_EQ(1.718281828459045, expm1(1.0));
+ EXPECT_EQ(2.6881171418161356e+43, expm1(100.0));
+ EXPECT_EQ(8.218407461554972e+307, expm1(709.0));
+ EXPECT_EQ(kInfinity, expm1(710.0));
+}
+
+TEST(Ieee754, Log) {
+ EXPECT_THAT(log(kQNaN), IsNaN());
+ EXPECT_THAT(log(kSNaN), IsNaN());
+ EXPECT_THAT(log(-kInfinity), IsNaN());
+ EXPECT_THAT(log(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log(-0.0));
+ EXPECT_EQ(-kInfinity, log(0.0));
+ EXPECT_EQ(0.0, log(1.0));
+ EXPECT_EQ(kInfinity, log(kInfinity));
+
+ // Test that log(E) produces the correctly rounded result.
+ EXPECT_EQ(1.0, log(kE));
+}
+
+TEST(Ieee754, Log1p) {
+ EXPECT_THAT(log1p(kQNaN), IsNaN());
+ EXPECT_THAT(log1p(kSNaN), IsNaN());
+ EXPECT_THAT(log1p(-kInfinity), IsNaN());
+ EXPECT_EQ(-kInfinity, log1p(-1.0));
+ EXPECT_EQ(0.0, log1p(0.0));
+ EXPECT_EQ(-0.0, log1p(-0.0));
+ EXPECT_EQ(kInfinity, log1p(kInfinity));
+ EXPECT_EQ(6.9756137364252422e-03, log1p(0.007));
+ EXPECT_EQ(709.782712893384, log1p(1.7976931348623157e308));
+ EXPECT_EQ(2.7755575615628914e-17, log1p(2.7755575615628914e-17));
+ EXPECT_EQ(9.313225741817976e-10, log1p(9.313225746154785e-10));
+ EXPECT_EQ(-0.2876820724517809, log1p(-0.25));
+ EXPECT_EQ(0.22314355131420976, log1p(0.25));
+ EXPECT_EQ(2.3978952727983707, log1p(10));
+ EXPECT_EQ(36.841361487904734, log1p(10e15));
+ EXPECT_EQ(37.08337388996168, log1p(12738099905822720));
+ EXPECT_EQ(37.08336444902049, log1p(12737979646738432));
+ EXPECT_EQ(1.3862943611198906, log1p(3));
+ EXPECT_EQ(1.3862945995384413, log1p(3 + 9.5367431640625e-7));
+ EXPECT_EQ(0.5596157879354227, log1p(0.75));
+ EXPECT_EQ(0.8109302162163288, log1p(1.25));
+}
+
+TEST(Ieee754, Log2) {
+ EXPECT_THAT(log2(kQNaN), IsNaN());
+ EXPECT_THAT(log2(kSNaN), IsNaN());
+ EXPECT_THAT(log2(-kInfinity), IsNaN());
+ EXPECT_THAT(log2(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log2(0.0));
+ EXPECT_EQ(-kInfinity, log2(-0.0));
+ EXPECT_EQ(kInfinity, log2(kInfinity));
+}
+
+TEST(Ieee754, Log10) {
+ EXPECT_THAT(log10(kQNaN), IsNaN());
+ EXPECT_THAT(log10(kSNaN), IsNaN());
+ EXPECT_THAT(log10(-kInfinity), IsNaN());
+ EXPECT_THAT(log10(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log10(0.0));
+ EXPECT_EQ(-kInfinity, log10(-0.0));
+ EXPECT_EQ(kInfinity, log10(kInfinity));
+ EXPECT_EQ(3.0, log10(1000.0));
+ EXPECT_EQ(14.0, log10(100000000000000)); // log10(10 ^ 14)
+ EXPECT_EQ(3.7389561269540406, log10(5482.2158));
+ EXPECT_EQ(14.661551142893833, log10(458723662312872.125782332587));
+ EXPECT_EQ(-0.9083828622192334, log10(0.12348583358871));
+ EXPECT_EQ(5.0, log10(100000.0));
+}
+
+TEST(Ieee754, Cbrt) {
+ EXPECT_THAT(cbrt(kQNaN), IsNaN());
+ EXPECT_THAT(cbrt(kSNaN), IsNaN());
+ EXPECT_EQ(kInfinity, cbrt(kInfinity));
+ EXPECT_EQ(-kInfinity, cbrt(-kInfinity));
+ EXPECT_EQ(1.4422495703074083, cbrt(3));
+ EXPECT_EQ(100, cbrt(100 * 100 * 100));
+ EXPECT_EQ(46.415888336127786, cbrt(100000));
+}
+
+TEST(Ieee754, Sin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sin(kQNaN), IsNaN());
+ EXPECT_THAT(sin(kSNaN), IsNaN());
+ EXPECT_THAT(sin(kInfinity), IsNaN());
+ EXPECT_THAT(sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, 1 / sin(-0.0));
+ EXPECT_EQ(kInfinity, 1 / sin(0.0));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, sin(0.5));
+ EXPECT_EQ(-0.479425538604203, sin(-0.5));
+ EXPECT_EQ(1, sin(kPI / 2.0));
+ EXPECT_EQ(-1, sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
+}
+
+TEST(Ieee754, Sinh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sinh(kQNaN), IsNaN());
+ EXPECT_THAT(sinh(kSNaN), IsNaN());
+ EXPECT_THAT(sinh(kInfinity), kInfinity);
+ EXPECT_THAT(sinh(-kInfinity), -kInfinity);
+ EXPECT_EQ(0.0, sinh(0.0));
+ EXPECT_EQ(-0.0, sinh(-0.0));
+}
+
+TEST(Ieee754, Tan) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(tan(kQNaN), IsNaN());
+ EXPECT_THAT(tan(kSNaN), IsNaN());
+ EXPECT_THAT(tan(kInfinity), IsNaN());
+ EXPECT_THAT(tan(-kInfinity), IsNaN());
+
+ // Tests for tan for |x| < pi/4
+ EXPECT_EQ(kInfinity, 1 / tan(0.0));
+ EXPECT_EQ(-kInfinity, 1 / tan(-0.0));
+ // tan(x) = x for |x| < 2^-28
+ EXPECT_EQ(2.3283064365386963e-10, tan(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, tan(-2.3283064365386963e-10));
+ // Test KERNELTAN for |x| > 0.67434.
+ EXPECT_EQ(0.8211418015898941, tan(11.0 / 16.0));
+ EXPECT_EQ(-0.8211418015898941, tan(-11.0 / 16.0));
+ EXPECT_EQ(0.41421356237309503, tan(0.39269908169872414));
+ // crbug/427468
+ EXPECT_EQ(0.7993357819992383, tan(0.6743358));
+
+ // Tests for tan.
+ EXPECT_EQ(3.725290298461914e-9, tan(3.725290298461914e-9));
+ // Test that tan(PI/2) != Infinity since PI is not exact.
+ EXPECT_EQ(1.633123935319537e16, tan(kPI / 2));
+ // Cover different code paths in KERNELTAN (tangent and cotangent)
+ EXPECT_EQ(0.5463024898437905, tan(0.5));
+ EXPECT_EQ(2.0000000000000027, tan(1.107148717794091));
+ EXPECT_EQ(-1.0000000000000004, tan(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.9999999999999994, tan(9.0 / 4.0 * kPI));
+ EXPECT_EQ(-6.420676210313675e-11, tan(1048576.0 / 2.0 * kPI));
+ EXPECT_EQ(2.910566692924059e11, tan(1048575.0 / 2.0 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.40806638884180424e0, tan(kTwo120));
+ EXPECT_EQ(0.40806638884180424e0, tan(-kTwo120));
+}
+
+TEST(Ieee754, Tanh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(tanh(kQNaN), IsNaN());
+ EXPECT_THAT(tanh(kSNaN), IsNaN());
+ EXPECT_THAT(tanh(kInfinity), 1);
+ EXPECT_THAT(tanh(-kInfinity), -1);
+ EXPECT_EQ(0.0, tanh(0.0));
+ EXPECT_EQ(-0.0, tanh(-0.0));
+}
+
+} // namespace ieee754
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index b3bfbab319..8b81eb90d6 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -15,7 +15,10 @@
#include "src/base/win32-headers.h"
#endif
+#include <vector>
+
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/platform.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -182,5 +185,77 @@ TEST(TimeTicks, IsMonotonic) {
}
}
+
+#if V8_OS_ANDROID
+#define MAYBE_ThreadNow DISABLED_ThreadNow
+#else
+#define MAYBE_ThreadNow ThreadNow
+#endif
+TEST(ThreadTicks, MAYBE_ThreadNow) {
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ TimeTicks begin = TimeTicks::Now();
+ ThreadTicks begin_thread = ThreadTicks::Now();
+ // Make sure that ThreadNow value is non-zero.
+ EXPECT_GT(begin_thread, ThreadTicks());
+ // Sleep for 10 milliseconds to get the thread de-scheduled.
+ OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ ThreadTicks end_thread = ThreadTicks::Now();
+ TimeTicks end = TimeTicks::Now();
+ TimeDelta delta = end - begin;
+ TimeDelta delta_thread = end_thread - begin_thread;
+ // Make sure that some thread time have elapsed.
+ EXPECT_GT(delta_thread.InMicroseconds(), 0);
+ // But the thread time is at least 9ms less than clock time.
+ TimeDelta difference = delta - delta_thread;
+ EXPECT_GE(difference.InMicroseconds(), 9000);
+ }
+}
+
+
+#if V8_OS_WIN
+TEST(TimeTicks, TimerPerformance) {
+ // Verify that various timer mechanisms can always complete quickly.
+ // Note: This is a somewhat arbitrary test.
+ const int kLoops = 10000;
+
+ typedef TimeTicks (*TestFunc)();
+ struct TestCase {
+ TestFunc func;
+ const char *description;
+ };
+ // Cheating a bit here: assumes sizeof(TimeTicks) == sizeof(Time)
+ // in order to create a single test case list.
+ static_assert(sizeof(TimeTicks) == sizeof(Time),
+ "TimeTicks and Time must be the same size");
+ std::vector<TestCase> cases;
+ cases.push_back({reinterpret_cast<TestFunc>(&Time::Now), "Time::Now"});
+ cases.push_back({&TimeTicks::Now, "TimeTicks::Now"});
+
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ cases.push_back(
+ {reinterpret_cast<TestFunc>(&ThreadTicks::Now), "ThreadTicks::Now"});
+ }
+
+ for (const auto& test_case : cases) {
+ TimeTicks start = TimeTicks::Now();
+ for (int index = 0; index < kLoops; index++)
+ test_case.func();
+ TimeTicks stop = TimeTicks::Now();
+ // Turning off the check for acceptible delays. Without this check,
+ // the test really doesn't do much other than measure. But the
+ // measurements are still useful for testing timers on various platforms.
+ // The reason to remove the check is because the tests run on many
+ // buildbots, some of which are VMs. These machines can run horribly
+ // slow, and there is really no value for checking against a max timer.
+ // const int kMaxTime = 35; // Maximum acceptible milliseconds for test.
+ // EXPECT_LT((stop - start).InMilliseconds(), kMaxTime);
+ printf("%s: %1.2fus per call\n", test_case.description,
+ (stop - start).InMillisecondsF() * 1000 / kLoops);
+ }
+}
+#endif // V8_OS_WIN
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/sys-info-unittest.cc b/deps/v8/test/unittests/base/sys-info-unittest.cc
index a760f941f6..a97c08c91c 100644
--- a/deps/v8/test/unittests/base/sys-info-unittest.cc
+++ b/deps/v8/test/unittests/base/sys-info-unittest.cc
@@ -5,12 +5,6 @@
#include "src/base/sys-info.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if V8_OS_NACL
-#define DISABLE_ON_NACL(Name) DISABLED_##Name
-#else
-#define DISABLE_ON_NACL(Name) Name
-#endif
-
namespace v8 {
namespace base {
@@ -18,8 +12,7 @@ TEST(SysInfoTest, NumberOfProcessors) {
EXPECT_LT(0, SysInfo::NumberOfProcessors());
}
-
-TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+TEST(SysInfoTest, AmountOfPhysicalMemory) {
EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
new file mode 100644
index 0000000000..922ed2f44e
--- /dev/null
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/ast/scopes.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
+#include "src/flags.h"
+#include "src/isolate-inl.h"
+#include "src/parsing/parse-info.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+typedef TestWithContext CompilerDispatcherJobTest;
+
+namespace {
+
+const char test_script[] = "(x) { x*x; }";
+
+class ScriptResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ ScriptResource(const char* data, size_t length)
+ : data_(data), length_(length) {}
+ ~ScriptResource() override = default;
+
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptResource);
+};
+
+Handle<JSFunction> CreateFunction(
+ Isolate* isolate, ExternalOneByteString::Resource* maybe_resource) {
+ HandleScope scope(isolate);
+ Handle<String> source;
+ if (maybe_resource) {
+ source = isolate->factory()
+ ->NewExternalStringFromOneByte(maybe_resource)
+ .ToHandleChecked();
+ } else {
+ source = isolate->factory()->NewStringFromAsciiChecked(test_script);
+ }
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+ isolate->factory()->NewStringFromAsciiChecked("f"), MaybeHandle<Code>(),
+ false);
+ SharedFunctionInfo::SetScript(shared, script);
+ shared->set_end_position(source->length());
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, handle(isolate->context(), isolate));
+ return scope.CloseAndEscape(function);
+}
+
+} // namespace
+
+TEST_F(CompilerDispatcherJobTest, Construct) {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+}
+
+TEST_F(CompilerDispatcherJobTest, CanParseOnBackgroundThread) {
+ {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+ ASSERT_FALSE(job->can_parse_on_background_thread());
+ }
+ {
+ ScriptResource script(test_script, strlen(test_script));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+ ASSERT_TRUE(job->can_parse_on_background_thread());
+ }
+}
+
+TEST_F(CompilerDispatcherJobTest, StateTransitions) {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+ job->PrepareToParseOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToParse);
+ job->Parse();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kParsed);
+ ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, SyntaxError) {
+ ScriptResource script("^^^", strlen("^^^"));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ ASSERT_FALSE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kFailed);
+ ASSERT_TRUE(i_isolate()->has_pending_exception());
+
+ i_isolate()->clear_pending_exception();
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, ScopeChain) {
+ const char script[] =
+ "function g() { var g = 1; function f(x) { return x * g }; return f; } "
+ "g();";
+ Handle<JSFunction> f = Handle<JSFunction>::cast(Utils::OpenHandle(
+ *v8::Script::Compile(isolate()->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate(), script,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked()
+ ->Run(isolate()->GetCurrentContext())
+ .ToLocalChecked()));
+
+ std::unique_ptr<CompilerDispatcherJob> job(
+ new CompilerDispatcherJob(i_isolate(), f, FLAG_stack_size));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
+
+ const AstRawString* var_x =
+ job->parse_info_->ast_value_factory()->GetOneByteString("x");
+ Variable* var = job->parse_info_->literal()->scope()->Lookup(var_x);
+ ASSERT_TRUE(var);
+ ASSERT_TRUE(var->IsUnallocated());
+
+ const AstRawString* var_g =
+ job->parse_info_->ast_value_factory()->GetOneByteString("g");
+ var = job->parse_info_->literal()->scope()->Lookup(var_g);
+ ASSERT_TRUE(var);
+ ASSERT_TRUE(var->IsContextSlot());
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 72cfc51d58..6317d91fa9 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1392,8 +1392,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1403,6 +1403,39 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+TEST_F(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Return(m.Load(MachineType::Int32(), m.Parameter(0), index));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Store(MachineRepresentation::kWord32, m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmStr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// Conversions.
@@ -1866,36 +1899,6 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
}
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
@@ -1971,6 +1974,250 @@ TEST_F(InstructionSelectorTest, Float64Sqrt) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
+// -----------------------------------------------------------------------------
+// Flag-setting instructions.
+
+const Comparison kBinopCmpZeroRightInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kNotEqual, kEqual,
+ kNotEqual},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kNegative,
+ kPositiveOrZero, kNegative},
+ {&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kPositiveOrZero, kNegative, kPositiveOrZero},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kEqual, kNotEqual, kEqual},
+ {&RawMachineAssembler::Uint32GreaterThan, "Uint32GreaterThan", kNotEqual,
+ kEqual, kNotEqual}};
+
+const Comparison kBinopCmpZeroLeftInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kNotEqual, kEqual,
+ kNotEqual},
+ {&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kNegative,
+ kPositiveOrZero, kNegative},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kPositiveOrZero, kNegative, kPositiveOrZero},
+ {&RawMachineAssembler::Uint32GreaterThanOrEqual, "Uint32GreaterThanOrEqual",
+ kEqual, kNotEqual, kEqual},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kNotEqual, kEqual,
+ kNotEqual}};
+
+struct FlagSettingInst {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode no_output_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagSettingInst& inst) {
+ return os << inst.constructor_name;
+}
+
+const FlagSettingInst kFlagSettingInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmCmn},
+ {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmTst},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmTeq}};
+
+typedef InstructionSelectorTestWithParam<FlagSettingInst>
+ InstructionSelectorFlagSettingTest;
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with single user : a cmp instruction.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroLeft) {
+ const FlagSettingInst inst = GetParam();
+ // Test a cmp with zero on the left-hand side.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroLeftInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(m.Int32Constant(0), binop);
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, but in a different basic block.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ // We don't optimise this case at the moment.
+ if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
+ continue;
+ }
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
+ const FlagSettingInst inst = GetParam();
+ // Like the test above, but with a shifted input to the binary operator.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ // We don't optimise this case at the moment.
+ if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
+ continue;
+ }
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(5);
+ Node* shift = m.Word32Shl(m.Parameter(1), imm);
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), shift);
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(5U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(5, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, in the same basic block. We need to make sure
+ // we don't try to optimise this case.
+ TRACED_FOREACH(Comparison, cmp, kComparisons) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Int32Mul(m.Parameter(0), binop);
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(mul);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ EXPECT_EQ(cmp.flags_condition == kEqual ? kArmTst : kArmCmp,
+ s[2]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[2]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteImmediate) {
+ const FlagSettingInst inst = GetParam();
+ // Immediate on left hand side of the binary operator.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(3);
+ Node* binop = (m.*inst.constructor)(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(3, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
+ const FlagSettingInst inst = GetParam();
+ // Left-hand side operand shifted by immediate.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* imm = m.Int32Constant(5);
+ Node* shifted_operand = (m.*shift.constructor)(m.Parameter(0), imm);
+ Node* binop = (m.*inst.constructor)(shifted_operand, m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Return(comp);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(5, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(inst.arch_opcode == kArmOrr ? 2U : 1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
// -----------------------------------------------------------------------------
// Miscellaneous.
@@ -2228,7 +2475,7 @@ TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- Stream s = m.Build(MLS);
+ Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmMls, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2324,7 +2571,7 @@ TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -2530,7 +2777,7 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -2954,6 +3201,70 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build(ARMv8);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmFloat64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build(ARMv8);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmFloat64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 73532aab2a..6ca5e5e684 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -184,8 +184,11 @@ const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kArm64Sub32, MachineType::Int32()}};
-
+ kArm64Sub32, MachineType::Int32()},
+ {&RawMachineAssembler::Int64AddWithOverflow, "Int64AddWithOverflow",
+ kArm64Add, MachineType::Int64()},
+ {&RawMachineAssembler::Int64SubWithOverflow, "Int64SubWithOverflow",
+ kArm64Sub, MachineType::Int64()}};
// ARM64 shift instructions.
const Shift kShiftInstructions[] = {
@@ -311,7 +314,7 @@ const Conversion kConversionInstructions[] = {
kArm64Mov32, MachineType::Uint64()},
MachineType::Uint32()},
{{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
- kArm64Mov32, MachineType::Int32()},
+ kArchNop, MachineType::Int32()},
MachineType::Int64()},
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
kArm64Int32ToFloat64, MachineType::Float64()},
@@ -326,6 +329,52 @@ const Conversion kConversionInstructions[] = {
kArm64Float64ToUint32, MachineType::Uint32()},
MachineType::Float64()}};
+// ARM64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kArm64Add32, MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kArm64Sub32, MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kArm64Imod32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kArm64Umod32,
+ MachineType::Uint32()},
+};
+
} // namespace
@@ -1132,7 +1181,6 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
}
}
-
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
@@ -1215,6 +1263,91 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
}
}
+TEST_F(InstructionSelectorTest, Word32EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word32NotEqual(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(m.Word64Equal(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word64NotEqual(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
{
@@ -1252,6 +1385,75 @@ TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
}
}
+TEST_F(InstructionSelectorTest, EqualZeroAndBranch) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32NotEqual(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64Equal(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64NotEqual(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+}
// -----------------------------------------------------------------------------
// Add and subtract instructions with overflow.
@@ -1407,6 +1609,29 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
}
}
+TEST_P(InstructionSelectorOvfAddSubTest, RORShift) {
+ // ADD and SUB do not support ROR shifts, make sure we do not try
+ // to merge them into the ADD/SUB instruction.
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ auto rotate = &RawMachineAssembler::Word64Ror;
+ ArchOpcode rotate_opcode = kArm64Ror;
+ if (type == MachineType::Int32()) {
+ rotate = &RawMachineAssembler::Word32Ror;
+ rotate_opcode = kArm64Ror32;
+ }
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, type, type, type);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*rotate)(p1, m.Int32Constant(imm));
+ m.Return((m.*dpi.constructor)(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(rotate_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(dpi.arch_opcode, s[1]->arch_opcode());
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorOvfAddSubTest,
@@ -1574,12 +1799,11 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Asr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
@@ -1596,7 +1820,6 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
}
@@ -2094,6 +2317,10 @@ TEST_P(InstructionSelectorConversionTest, Parameter) {
StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
Stream s = m.Build();
+ if (conv.mi.arch_opcode == kArchNop) {
+ ASSERT_EQ(0U, s.size());
+ return;
+ }
ASSERT_EQ(1U, s.size());
EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
@@ -2105,6 +2332,154 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorElidedChangeUint32ToUint64Test;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // Ldrb
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Ldrh
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // LdrW
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64LdrW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Ldrb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Ldrsb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Ldrh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Ldrsh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Ldrsw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Ldrsw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// Memory access instructions.
@@ -2244,12 +2619,131 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreZero) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FORRANGE(int, immediate_shift, 0, 4) {
+ // 32 bit shift
+ {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Return(m.Load(memacc.type, m.Parameter(0), index));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the load instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ // 64 bit shift
+ {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int64());
+ Node* const index =
+ m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
+ m.Return(m.Load(memacc.type, m.Parameter(0), index));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the load instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FORRANGE(int, immediate_shift, 0, 4) {
+ // 32 bit shift
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Store(memacc.type.representation(), m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the store instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ // 64 bit shift
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int64(), memacc.type);
+ Node* const index =
+ m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
+ m.Store(memacc.type.representation(), m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the store instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
@@ -2616,6 +3110,7 @@ namespace {
struct IntegerCmp {
MachInst2 mi;
FlagsCondition cond;
+ FlagsCondition commuted_cond;
};
@@ -2628,19 +3123,24 @@ std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()},
+ kEqual,
kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
MachineType::Int32()},
- kSignedLessThan},
+ kSignedLessThan,
+ kSignedGreaterThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kArm64Cmp32, MachineType::Int32()},
- kSignedLessThanOrEqual},
+ kSignedLessThanOrEqual,
+ kSignedGreaterThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
MachineType::Uint32()},
- kUnsignedLessThan},
+ kUnsignedLessThan,
+ kUnsignedGreaterThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kArm64Cmp32, MachineType::Uint32()},
- kUnsignedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual,
+ kUnsignedGreaterThanOrEqual}};
} // namespace
@@ -2677,6 +3177,473 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
}
}
+TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, sub));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ // Cmp does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ sub));
+ Stream s = m.Build();
+ // Cmn does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Flag-setting add and and instructions.
+
+const IntegerCmp kBinopCmpZeroRightInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNegative,
+ kNegative},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kPositiveOrZero,
+ kPositiveOrZero},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Uint32GreaterThan, "Uint32GreaterThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual}};
+
+const IntegerCmp kBinopCmpZeroLeftInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNegative,
+ kNegative},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kPositiveOrZero,
+ kPositiveOrZero},
+ {{&RawMachineAssembler::Uint32GreaterThanOrEqual,
+ "Uint32GreaterThanOrEqual", kArm64Cmp32, MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual}};
+
+struct FlagSettingInst {
+ MachInst2 mi;
+ ArchOpcode no_output_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagSettingInst& inst) {
+ return os << inst.mi.constructor_name;
+}
+
+const FlagSettingInst kFlagSettingInstructions[] = {
+ {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
+ kArm64Cmn32},
+ {{&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Int32()},
+ kArm64Tst32}};
+
+typedef InstructionSelectorTestWithParam<FlagSettingInst>
+ InstructionSelectorFlagSettingTest;
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
+ const FlagSettingInst inst = GetParam();
+ // Add with single user : a cmp instruction.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroLeft) {
+ const FlagSettingInst inst = GetParam();
+ // Test a cmp with zero on the left-hand side.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroLeftInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(m.Int32Constant(0), binop);
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, but in a different basic block.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ // For kEqual and kNotEqual, we generate a cbz or cbnz.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
+ const FlagSettingInst inst = GetParam();
+ // Like the test above, but with a shifted input to the binary operator.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ // For kEqual and kNotEqual, we generate a cbz or cbnz.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(5);
+ Node* shift = m.Word32Shl(m.Parameter(1), imm);
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), shift);
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(5U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(5, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, in the same basic block. We need to make sure
+ // we don't try to optimise this case.
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Int32Mul(m.Parameter(0), binop);
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(mul);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kArm64Mul32, s[1]->arch_opcode());
+ EXPECT_EQ(cmp.cond == kEqual ? kArm64CompareAndBranch32 : kArm64Cmp32,
+ s[2]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[2]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteImmediate) {
+ const FlagSettingInst inst = GetParam();
+ // Immediate on left hand side of the binary operator.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ // 3 can be an immediate on both arithmetic and logical instructions.
+ Node* imm = m.Int32Constant(3);
+ Node* binop = (m.*inst.mi.constructor)(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(3, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
+ const FlagSettingInst inst = GetParam();
+ // Left-hand side operand shifted by immediate.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* imm = m.Int32Constant(5);
+ Node* shifted_operand = (m.*shift.mi.constructor)(m.Parameter(0), imm);
+ Node* binop = (m.*inst.mi.constructor)(shifted_operand, m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Return(comp);
+ Stream s = m.Build();
+ // Cmn does not support ROR shifts.
+ if (inst.no_output_opcode == kArm64Cmn32 &&
+ shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(5, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
+
+TEST_F(InstructionSelectorTest, TstInvalidImmediate) {
+ // Make sure we do not generate an invalid immediate for TST.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ // 5 is not a valid constant for TST.
+ Node* imm = m.Int32Constant(5);
+ Node* binop = m.Word32And(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CommuteAddsExtend) {
+ // Extended left-hand side operand.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ Node* binop = m.Int32Add(extend, m.Parameter(1));
+ m.Return((m.*cmp.mi.constructor)(binop, m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -3254,32 +4221,16 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
+ Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float32Max, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Float64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
@@ -3288,17 +4239,16 @@ TEST_F(InstructionSelectorTest, Float32Max) {
}
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
+ Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float32Min, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Float64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
@@ -3306,44 +4256,61 @@ TEST_F(InstructionSelectorTest, Float32Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
-TEST_F(InstructionSelectorTest, Float64Max) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
- MachineType::Float64());
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float64Max(p0, p1);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg(), m.Parameter(0));
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Float32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
-TEST_F(InstructionSelectorTest, Float64Min) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
- MachineType::Float64());
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float64Min(p0, p1);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg(), m.Parameter(0));
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
+ {
+ int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
+ 2, 3, 255, 256, 260, 4096, 4100,
+ 8192, 8196, 3276, 3280, 16376, 16380};
+ TRACED_FOREACH(int32_t, index, immediates) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer());
+ Node* const load = m.Load(MachineType::Uint64(), m.Parameter(0),
+ m.Int32Constant(index - 4));
+ Node* const sar = m.Word64Sar(load, m.Int32Constant(32));
+ // Make sure we don't fold the shift into the following add:
+ m.Return(m.Int64Add(sar, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
deleted file mode 100644
index fd0766caba..0000000000
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/code-stubs.h"
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "test/unittests/compiler/compiler-test-utils.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-#include "testing/gmock-support.h"
-
-using testing::_;
-using testing::AllOf;
-using testing::BitEq;
-using testing::Capture;
-using testing::CaptureEq;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class ChangeLoweringTest : public TypedGraphTest {
- public:
- ChangeLoweringTest() : simplified_(zone()) {}
-
- virtual MachineRepresentation WordRepresentation() const = 0;
-
- protected:
- bool Is32() const {
- return WordRepresentation() == MachineRepresentation::kWord32;
- }
- bool Is64() const {
- return WordRepresentation() == MachineRepresentation::kWord64;
- }
-
- Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine(zone(), WordRepresentation());
- JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
- &machine);
- ChangeLowering reducer(&jsgraph);
- return reducer.Reduce(node);
- }
-
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher) {
- return IsCall(
- _, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()),
- IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher);
- }
- Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
- IsSmiShiftBitsConstant())
- : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsChangeSmiToInt32(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsTruncateInt64ToInt32(
- IsWord64Sar(value_matcher, IsSmiShiftBitsConstant()))
- : IsWord32Sar(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsChangeUint32ToSmi(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher),
- IsSmiShiftBitsConstant())
- : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher) {
- return IsLoad(MachineType::Float64(), value_matcher,
- IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
- graph()->start(), control_matcher);
- }
- Matcher<Node*> IsIntPtrConstant(int value) {
- return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
- }
- Matcher<Node*> IsSmiShiftBitsConstant() {
- return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize);
- }
- Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
- : IsWord64Equal(lhs_matcher, rhs_matcher);
- }
-
- private:
- SimplifiedOperatorBuilder simplified_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Common.
-
-
-class ChangeLoweringCommonTest
- : public ChangeLoweringTest,
- public ::testing::WithParamInterface<MachineRepresentation> {
- public:
- ~ChangeLoweringCommonTest() override {}
-
- MachineRepresentation WordRepresentation() const final { return GetParam(); }
-};
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
- Node* value = Parameter(Type::Boolean());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeBitToBool(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kTagged, value,
- IsTrueConstant(), IsFalseConstant()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
- Node* value = Parameter(Type::Number());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsWordEqual(value, IsTrueConstant()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeInt32ToTaggedWithSignedSmall) {
- Node* value = Parameter(Type::SignedSmall());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeInt32ToTagged(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeUint32ToTaggedWithUnsignedSmall) {
- Node* value = Parameter(Type::UnsignedSmall());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeUint32ToSmi(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedSigned) {
- Node* value = Parameter(Type::TaggedSigned());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedPointer) {
- Node* value = Parameter(Type::TaggedPointer());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeFloat64ToInt32(
- IsLoadHeapNumber(value, graph()->start())));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedSigned) {
- Node* value = Parameter(Type::TaggedSigned());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedPointer) {
- Node* value = Parameter(Type::TaggedPointer());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeFloat64ToUint32(
- IsLoadHeapNumber(value, graph()->start())));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::TaggedSigned());
- Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- p0, IsIntPtrConstant(access.offset - access.tag()), p1,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Tagged());
- Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier),
- p0, IsIntPtrConstant(access.offset - access.tag()), p1,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* load = graph()->NewNode(simplified()->LoadField(access), p0,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match = IsIntPtrConstant(access.offset - access.tag());
- EXPECT_THAT(r.replacement(),
- IsLoad(MachineType::AnyTagged(), p0,
- IsIntPtrConstant(access.offset - access.tag()),
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* p2 = Parameter(Type::Tagged());
- Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
- IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier),
- p0, index_match, p2, graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Signed32(), MachineType::Uint8()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* p2 = Parameter(Type::Signed32());
- Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kWord8,
- kNoWriteBarrier),
- p0, index_match, p2, graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
- IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Signed32(), MachineType::Int8()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) {
- Node* p0 = Parameter(Type::Signed32());
- Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0,
- graph()->start(), graph()->start());
- Reduction r = Reduce(alloc);
-
- // Only check that we lowered, but do not specify the exact form since
- // this is subject to change.
- ASSERT_TRUE(r.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
- ::testing::Values(MachineRepresentation::kWord32,
- MachineRepresentation::kWord64));
-
-
-// -----------------------------------------------------------------------------
-// 32-bit
-
-
-class ChangeLowering32Test : public ChangeLoweringTest {
- public:
- ~ChangeLowering32Test() override {}
- MachineRepresentation WordRepresentation() const final {
- return MachineRepresentation::kWord32;
- }
-};
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
- Node* value = Parameter(Type::Integral32());
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> add, branch, heap_number, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kTagged,
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_true))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeInt32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_true))),
- IsProjection(0, AllOf(CaptureEq(&add),
- IsInt32AddWithOverflow(value, value))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(CaptureEq(&branch),
- IsBranch(IsProjection(1, CaptureEq(&add)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kFloat64,
- IsLoadHeapNumber(value, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsWord32Sar(
- value, IsInt32Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(
- value, IsInt32Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Unsigned32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, heap_number, if_false;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kTagged,
- IsWord32Shl(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt32Constant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_false))),
- IsMerge(IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- value, IsInt32Constant(Smi::kMaxValue)),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
-// 64-bit
-
-
-class ChangeLowering64Test : public ChangeLoweringTest {
- public:
- ~ChangeLowering64Test() override {}
- MachineRepresentation WordRepresentation() const final {
- return MachineRepresentation::kWord64;
- }
-};
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kFloat64,
- IsLoadHeapNumber(value, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(IsWord64Sar(
- value, IsInt64Constant(kSmiTagSize + kSmiShiftSize)))),
- IsMerge(AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(
- value, IsInt64Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Unsigned32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, heap_number, if_false;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kTagged,
- IsWord64Shl(IsChangeUint32ToUint64(value),
- IsInt64Constant(kSmiTagSize + kSmiShiftSize)),
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt64Constant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_false))),
- IsMerge(IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- value, IsInt32Constant(Smi::kMaxValue)),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
new file mode 100644
index 0000000000..a201fc9a55
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CheckpointEliminationTest : public GraphTest {
+ public:
+ CheckpointEliminationTest() : GraphTest() {}
+ ~CheckpointEliminationTest() override {}
+
+ protected:
+ Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
+ CheckpointElimination reducer(editor);
+ return reducer.Reduce(node);
+ }
+
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+};
+
+namespace {
+
+const Operator kOpNoWrite(0, Operator::kNoWrite, "OpNoWrite", 0, 1, 0, 0, 1, 0);
+
+} // namespace
+
+// -----------------------------------------------------------------------------
+// Checkpoint
+
+TEST_F(CheckpointEliminationTest, CheckpointChain) {
+ Node* const control = graph()->start();
+ Node* frame_state = EmptyFrameState();
+ Node* checkpoint1 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ graph()->start(), control);
+ Node* effect_link = graph()->NewNode(&kOpNoWrite, checkpoint1);
+ Node* checkpoint2 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ effect_link, control);
+ Reduction r = Reduce(checkpoint2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(effect_link, r.replacement());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
deleted file mode 100644
index fe8fac4bfe..0000000000
--- a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "test/unittests/compiler/live-range-builder.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-class CoalescedLiveRangesTest : public TestWithZone {
- public:
- CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
- bool HasNoConflicts(const LiveRange* range);
- bool ConflictsPreciselyWith(const LiveRange* range, int id);
- bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
-
- CoalescedLiveRanges& ranges() { return ranges_; }
- const CoalescedLiveRanges& ranges() const { return ranges_; }
- bool AllocationsAreValid() const;
- void RemoveConflicts(LiveRange* range);
-
- private:
- typedef ZoneSet<int> LiveRangeIDs;
- bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
- CoalescedLiveRanges ranges_;
-};
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id) {
- LiveRangeIDs set(zone());
- set.insert(id);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id1, int id2) {
- LiveRangeIDs set(zone());
- set.insert(id1);
- set.insert(id2);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
- LiveRangeIDs set(zone());
- return IsRangeConflictingWith(range, set);
-}
-
-
-void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
- auto conflicts = ranges().GetConflicts(range);
- LiveRangeIDs seen(zone());
- for (auto c = conflicts.Current(); c != nullptr;
- c = conflicts.RemoveCurrentAndGetNext()) {
- int id = c->TopLevel()->vreg();
- EXPECT_FALSE(seen.count(id) > 0);
- seen.insert(c->TopLevel()->vreg());
- }
-}
-
-
-bool CoalescedLiveRangesTest::AllocationsAreValid() const {
- return ranges().VerifyAllocationsAreValidForTesting();
-}
-
-
-bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
- const LiveRangeIDs& ids) {
- LiveRangeIDs found_ids(zone());
-
- auto conflicts = ranges().GetConflicts(range);
- for (auto conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.GetNext()) {
- found_ids.insert(conflict->TopLevel()->vreg());
- }
- return found_ids == ids;
-}
-
-
-TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ASSERT_TRUE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(HasNoConflicts(range));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query =
- TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
- range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
- ranges().AllocateRange(range);
- ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 1c163706f2..f51a54d074 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -105,40 +105,6 @@ TEST_F(CommonOperatorReducerTest, BranchWithInt32OneConstant) {
}
-TEST_F(CommonOperatorReducerTest, BranchWithInt64ZeroConstant) {
- TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- Node* const control = graph()->start();
- Node* const branch =
- graph()->NewNode(common()->Branch(hint), Int64Constant(0), control);
- Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Replace(if_true, IsDead()));
- EXPECT_CALL(editor, Replace(if_false, control));
- Reduction const r = Reduce(&editor, branch);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
-}
-
-
-TEST_F(CommonOperatorReducerTest, BranchWithInt64OneConstant) {
- TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- Node* const control = graph()->start();
- Node* const branch =
- graph()->NewNode(common()->Branch(hint), Int64Constant(1), control);
- Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Replace(if_true, control));
- EXPECT_CALL(editor, Replace(if_false, IsDead()));
- Reduction const r = Reduce(&editor, branch);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
-}
-
-
TEST_F(CommonOperatorReducerTest, BranchWithFalseConstant) {
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
Node* const control = graph()->start();
@@ -358,78 +324,6 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
}
-TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, 2), p1, p0, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), p1, p0, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, 2), p0, p1, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), p0, p1, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
-}
-
-
// -----------------------------------------------------------------------------
// Return
@@ -500,30 +394,6 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
}
-TEST_F(CommonOperatorReducerTest, SelectWithInt64ZeroConstant) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- Int64Constant(0), p0, p1);
- Reduction r = Reduce(select);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p1, r.replacement());
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectWithInt64OneConstant) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- Int64Constant(1), p0, p1);
- Reduction r = Reduce(select);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p0, r.replacement());
-}
-
-
TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
@@ -572,54 +442,6 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat32), check, p1, p0);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64), check, p1, p0);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat32), check, p0, p1);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64), check, p0, p1);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 0a55a2e2a2..787dae01dd 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -38,7 +38,6 @@ std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
return os << IrOpcode::Mnemonic(fop.opcode);
}
-
const SharedOperator kSharedOperators[] = {
#define SHARED(Name, properties, value_input_count, effect_input_count, \
control_input_count, value_output_count, effect_output_count, \
@@ -52,6 +51,7 @@ const SharedOperator kSharedOperators[] = {
SHARED(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1),
SHARED(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1),
SHARED(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)
#undef SHARED
@@ -220,24 +220,6 @@ TEST_F(CommonOperatorTest, Branch) {
}
-TEST_F(CommonOperatorTest, IfException) {
- static const IfExceptionHint kIfExceptionHints[] = {
- IfExceptionHint::kLocallyCaught, IfExceptionHint::kLocallyUncaught};
- TRACED_FOREACH(IfExceptionHint, hint, kIfExceptionHints) {
- const Operator* const op = common()->IfException(hint);
- EXPECT_EQ(IrOpcode::kIfException, op->opcode());
- EXPECT_EQ(Operator::kKontrol, op->properties());
- EXPECT_EQ(0, op->ValueInputCount());
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, op->ControlInputCount());
- EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(1, op->ValueOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(1, op->ControlOutputCount());
- }
-}
-
-
TEST_F(CommonOperatorTest, Switch) {
TRACED_FOREACH(size_t, cases, kCases) {
const Operator* const op = common()->Switch(cases);
@@ -362,15 +344,26 @@ TEST_F(CommonOperatorTest, NumberConstant) {
TEST_F(CommonOperatorTest, BeginRegion) {
- const Operator* op = common()->BeginRegion();
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ControlOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ValueOutputCount());
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kNotObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
}
-
TEST_F(CommonOperatorTest, FinishRegion) {
const Operator* op = common()->FinishRegion();
EXPECT_EQ(1, op->ValueInputCount());
@@ -381,6 +374,19 @@ TEST_F(CommonOperatorTest, FinishRegion) {
EXPECT_EQ(1, op->ValueOutputCount());
}
+TEST_F(CommonOperatorTest, Projection) {
+ TRACED_FORRANGE(size_t, index, 0, 3) {
+ const Operator* op = common()->Projection(index);
+ EXPECT_EQ(index, ProjectionIndexOf(op));
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index a5a3c74be2..a0c483344e 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -96,34 +96,6 @@ TEST_F(ControlFlowOptimizerTest, BuildSwitch2) {
IsSwitch(index, IsIfSuccess(index)))))));
}
-
-TEST_F(ControlFlowOptimizerTest, CloneBranch) {
- Node* cond0 = Parameter(0);
- Node* cond1 = Parameter(1);
- Node* cond2 = Parameter(2);
- Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
- Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
- Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
- cond1, cond2, merge0);
- Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
- Optimize();
- Capture<Node*> branch1_capture, branch2_capture;
- EXPECT_THAT(
- end(),
- IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
- IsIfTrue(CaptureEq(&branch2_capture))),
- IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
- IsBranch(cond1, control1))),
- IsIfFalse(AllOf(CaptureEq(&branch2_capture),
- IsBranch(cond2, control2)))))));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index df93f25302..d0351bf5f5 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -124,15 +124,11 @@ TEST_F(DeadCodeEliminationTest, IfSuccessWithDeadInput) {
TEST_F(DeadCodeEliminationTest, IfExceptionWithDeadControlInput) {
- IfExceptionHint const kHints[] = {IfExceptionHint::kLocallyCaught,
- IfExceptionHint::kLocallyUncaught};
- TRACED_FOREACH(IfExceptionHint, hint, kHints) {
- Reduction const r =
- Reduce(graph()->NewNode(common()->IfException(hint), graph()->start(),
- graph()->NewNode(common()->Dead())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->IfException(), graph()->start(),
+ graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
}
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
new file mode 100644
index 0000000000..71a8696d09
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -0,0 +1,399 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/effect-control-linearizer.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using testing::Capture;
+
+class EffectControlLinearizerTest : public TypedGraphTest {
+ public:
+ EffectControlLinearizerTest()
+ : TypedGraphTest(3),
+ machine_(zone()),
+ javascript_(zone()),
+ simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
+ &machine_) {}
+
+ JSGraph* jsgraph() { return &jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
+};
+
+namespace {
+
+BasicBlock* AddBlockToSchedule(Schedule* schedule) {
+ BasicBlock* block = schedule->NewBasicBlock();
+ block->set_rpo_number(static_cast<int32_t>(schedule->rpo_order()->size()));
+ schedule->rpo_order()->push_back(block);
+ return block;
+}
+
+} // namespace
+
+TEST_F(EffectControlLinearizerTest, SimpleLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* heap_number = NumberConstant(0.5);
+ Node* load = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), graph()->start());
+ Node* ret = graph()->NewNode(common()->Return(), load, graph()->start(),
+ graph()->start());
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, heap_number);
+ schedule.AddNode(start, load);
+ schedule.AddReturn(start, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ EXPECT_THAT(load,
+ IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number,
+ graph()->start(), graph()->start()));
+ // The return should have reconnected effect edge to the load.
+ EXPECT_THAT(ret, IsReturn(load, load, graph()->start()));
+}
+
+TEST_F(EffectControlLinearizerTest, DiamondLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* branch =
+ graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* heap_number = NumberConstant(0.5);
+ Node* vtrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = Float64Constant(2);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
+
+ Node* ret =
+ graph()->NewNode(common()->Return(), phi, graph()->start(), merge);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* mblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddNode(tblock, heap_number);
+ schedule.AddNode(tblock, vtrue);
+ schedule.AddGoto(tblock, mblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddNode(fblock, vfalse);
+ schedule.AddGoto(fblock, mblock);
+
+ schedule.AddNode(mblock, merge);
+ schedule.AddNode(mblock, phi);
+ schedule.AddReturn(mblock, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ // The effect input to the return should be an effect phi with the
+ // newly introduced effectful change operators.
+ ASSERT_THAT(
+ ret, IsReturn(phi, IsEffectPhi(vtrue, graph()->start(), merge), merge));
+}
+
+TEST_F(EffectControlLinearizerTest, FloatingDiamondsControlWiring) {
+ Schedule schedule(zone());
+
+ // Create the graph and schedule. Roughly (omitting effects and unimportant
+ // nodes):
+ //
+ // BLOCK 0:
+ // r1: Start
+ // c1: Call
+ // b1: Branch(const0, s1)
+ // |
+ // +-------+------+
+ // | |
+ // BLOCK 1: BLOCK 2:
+ // t1: IfTrue(b1) f1: IfFalse(b1)
+ // | |
+ // +-------+------+
+ // |
+ // BLOCK 3:
+ // m1: Merge(t1, f1)
+ // c2: IfSuccess(c1)
+ // b2: Branch(const0 , s1)
+ // |
+ // +-------+------+
+ // | |
+ // BLOCK 4: BLOCK 5:
+ // t2: IfTrue(b2) f2:IfFalse(b2)
+ // | |
+ // +-------+------+
+ // |
+ // BLOCK 6:
+ // m2: Merge(t2, f2)
+ // r1: Return(c1, c2)
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* const0 = Int32Constant(0);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+
+ // First Floating diamond.
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(), const0, graph()->start());
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge1 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+
+ // Second floating diamond.
+ Node* branch2 =
+ graph()->NewNode(common()->Branch(), const0, graph()->start());
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* merge2 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+
+ Node* ret =
+ graph()->NewNode(common()->Return(), call, graph()->start(), if_success);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* t1block = AddBlockToSchedule(&schedule);
+ BasicBlock* f1block = AddBlockToSchedule(&schedule);
+ BasicBlock* m1block = AddBlockToSchedule(&schedule);
+
+ BasicBlock* t2block = AddBlockToSchedule(&schedule);
+ BasicBlock* f2block = AddBlockToSchedule(&schedule);
+ BasicBlock* m2block = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, p0);
+ schedule.AddNode(start, p1);
+ schedule.AddNode(start, const0);
+ schedule.AddNode(start, call);
+ schedule.AddBranch(start, branch1, t1block, f1block);
+
+ schedule.AddNode(t1block, if_true1);
+ schedule.AddGoto(t1block, m1block);
+
+ schedule.AddNode(f1block, if_false1);
+ schedule.AddGoto(f1block, m1block);
+
+ schedule.AddNode(m1block, merge1);
+ // The scheduler does not always put the IfSuccess node to the corresponding
+ // call's block, simulate that here.
+ schedule.AddNode(m1block, if_success);
+ schedule.AddBranch(m1block, branch2, t2block, f2block);
+
+ schedule.AddNode(t2block, if_true2);
+ schedule.AddGoto(t2block, m2block);
+
+ schedule.AddNode(f2block, if_false2);
+ schedule.AddGoto(f2block, m2block);
+
+ schedule.AddNode(m2block, merge2);
+ schedule.AddReturn(m2block, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ // The effect input to the return should be an effect phi with the
+ // newly introduced effectful change operators.
+ ASSERT_THAT(ret, IsReturn(call, call, merge2));
+ ASSERT_THAT(branch2, IsBranch(const0, merge1));
+ ASSERT_THAT(branch1, IsBranch(const0, if_success));
+ ASSERT_THAT(if_success, IsIfSuccess(call));
+}
+
+TEST_F(EffectControlLinearizerTest, LoopLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* loop = graph()->NewNode(common()->Loop(1), graph()->start());
+ Node* effect_phi =
+ graph()->NewNode(common()->EffectPhi(1), graph()->start(), loop);
+
+ Node* cond = Int32Constant(0);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ loop->AppendInput(zone(), if_false);
+ NodeProperties::ChangeOp(loop, common()->Loop(2));
+
+ effect_phi->InsertInput(zone(), 1, effect_phi);
+ NodeProperties::ChangeOp(effect_phi, common()->EffectPhi(2));
+
+ Node* heap_number = NumberConstant(0.5);
+ Node* load = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), loop);
+
+ Node* ret = graph()->NewNode(common()->Return(), load, effect_phi, if_true);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* lblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* rblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddGoto(start, lblock);
+
+ schedule.AddNode(lblock, loop);
+ schedule.AddNode(lblock, effect_phi);
+ schedule.AddNode(lblock, heap_number);
+ schedule.AddNode(lblock, load);
+ schedule.AddNode(lblock, cond);
+ schedule.AddBranch(lblock, branch, rblock, fblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddGoto(fblock, lblock);
+
+ schedule.AddNode(rblock, if_true);
+ schedule.AddReturn(rblock, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ ASSERT_THAT(ret, IsReturn(load, load, if_true));
+ EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(),
+ heap_number, effect_phi, loop));
+}
+
+TEST_F(EffectControlLinearizerTest, CloneBranch) {
+ Schedule schedule(zone());
+
+ Node* cond0 = Parameter(0);
+ Node* cond1 = Parameter(1);
+ Node* cond2 = Parameter(2);
+ Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
+ Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
+ Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
+ cond1, cond2, merge0);
+ Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
+
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* f1block = AddBlockToSchedule(&schedule);
+ BasicBlock* t1block = AddBlockToSchedule(&schedule);
+ BasicBlock* bblock = AddBlockToSchedule(&schedule);
+
+ BasicBlock* f2block = AddBlockToSchedule(&schedule);
+ BasicBlock* t2block = AddBlockToSchedule(&schedule);
+ BasicBlock* mblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+
+ schedule.AddBranch(start, branch0, t1block, f1block);
+
+ schedule.AddNode(t1block, control1);
+ schedule.AddGoto(t1block, bblock);
+
+ schedule.AddNode(f1block, control2);
+ schedule.AddGoto(f1block, bblock);
+
+ schedule.AddNode(bblock, merge0);
+ schedule.AddNode(bblock, phi0);
+ schedule.AddBranch(bblock, branch, t2block, f2block);
+
+ schedule.AddNode(t2block, if_true);
+ schedule.AddGoto(t2block, mblock);
+
+ schedule.AddNode(f2block, if_false);
+ schedule.AddGoto(f2block, mblock);
+
+ schedule.AddNode(mblock, merge);
+ schedule.AddNode(mblock, graph()->end());
+
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ Capture<Node *> branch1_capture, branch2_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
+ IsIfTrue(CaptureEq(&branch2_capture))),
+ IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
+ IsBranch(cond1, control1))),
+ IsIfFalse(AllOf(CaptureEq(&branch2_capture),
+ IsBranch(cond2, control2)))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index d5e12ba0db..990b813947 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -48,7 +48,8 @@ class EscapeAnalysisTest : public GraphTest {
effect = effect_;
}
- return effect_ = graph()->NewNode(common()->BeginRegion(), effect);
+ return effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
}
Node* FinishRegion(Node* value, Node* effect = nullptr) {
@@ -146,14 +147,18 @@ class EscapeAnalysisTest : public GraphTest {
}
FieldAccess FieldAccessAtIndex(int offset) {
- FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
ElementAccess MakeElementAccess(int header_size) {
ElementAccess access = {kTaggedBase, header_size, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -441,8 +446,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect1, ifFalse);
+ Node* deopt = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
@@ -481,8 +487,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect1, ifFalse);
+ Node* deopt = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index 8d05c526c3..7d94793459 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -283,8 +283,6 @@ const Operator kMockOpEffect(IrOpcode::kDead, Operator::kNoProperties,
const Operator kMockOpControl(IrOpcode::kDead, Operator::kNoProperties,
"MockOpControl", 0, 0, 1, 1, 0, 1);
-const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
-
} // namespace
@@ -348,7 +346,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse2) {
Node* dead = graph()->NewNode(&kMockOperator);
Node* node = graph()->NewNode(&kMockOpControl, start);
Node* success = graph()->NewNode(common.IfSuccess(), node);
- Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), dead);
@@ -372,7 +370,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse3) {
Node* dead = graph()->NewNode(&kMockOperator);
Node* node = graph()->NewNode(&kMockOpControl, start);
Node* success = graph()->NewNode(common.IfSuccess(), node);
- Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), dead);
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 31bae6d415..d4248e422b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -48,6 +48,9 @@ class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
Node* EmptyFrameState();
+ Matcher<Node*> IsBooleanConstant(bool value) {
+ return value ? IsTrueConstant() : IsFalseConstant();
+ }
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
Matcher<Node*> IsUndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 5280f69aa0..f2c9c2609b 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -751,71 +751,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
-
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
// -----------------------------------------------------------------------------
// Miscellaneous.
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 16030f80d7..d2953159fc 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -45,9 +45,8 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
selector.SelectInstructions();
if (FLAG_trace_turbo) {
OFStream out(stdout);
- PrintableInstructionSequence printable = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- &sequence};
+ PrintableInstructionSequence printable = {RegisterConfiguration::Turbofan(),
+ &sequence};
out << "=== Code sequence after instruction selection ===" << std::endl
<< printable;
}
@@ -94,18 +93,18 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
}
for (auto i : s.virtual_registers_) {
int const virtual_register = i.second;
- if (sequence.IsFloat(virtual_register)) {
+ if (sequence.IsFP(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
- EXPECT_FALSE(sequence.IsFloat(virtual_register));
+ EXPECT_FALSE(sequence.IsFP(virtual_register));
s.references_.insert(virtual_register);
}
}
- for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
- s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
- InstructionSequence::StateId::FromInt(i)));
+ for (int i = 0; i < sequence.GetDeoptimizationEntryCount(); i++) {
+ s.deoptimization_entries_.push_back(
+ sequence.GetDeoptimizationEntry(i).descriptor());
}
return s;
}
@@ -199,11 +198,9 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
// -----------------------------------------------------------------------------
// Conversions.
-
-TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToWord32WithParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
- m.Return(
- m.TruncateFloat64ToInt32(TruncationMode::kJavaScript, m.Parameter(0)));
+ m.Return(m.TruncateFloat64ToWord32(m.Parameter(0)));
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
@@ -335,7 +332,8 @@ TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
Node* p2 = m2.Parameter(0);
m2.Return(m2.AddNode(
m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
- m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
+ m2.AddNode(m2.common()->BeginRegion(RegionObservability::kObservable),
+ m2.graph()->start())));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -482,7 +480,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context.
// We inserted 0 here.
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
- EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined());
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate()));
EXPECT_EQ(MachineType::AnyTagged(),
desc_before->GetType(0)); // function is always
// tagged/any.
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index f1397faa06..93cef0544e 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -137,13 +137,15 @@ class InstructionSelectorTest : public TestWithContext,
// Add return location(s).
const int return_count = static_cast<int>(msig->return_count());
for (int i = 0; i < return_count; i++) {
- locations.AddReturn(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ locations.AddReturn(
+ LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetReturn(i)));
}
// Just put all parameters on the stack.
const int parameter_count = static_cast<int>(msig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetParam(i)));
}
const RegList kCalleeSaveRegisters = 0;
@@ -155,7 +157,6 @@ class InstructionSelectorTest : public TestWithContext,
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
target_loc, // target location
- msig, // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoProperties, // properties
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 51112a6470..a0a86e043a 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -15,10 +15,9 @@ namespace compiler {
static const char*
general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
static const char*
- double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
+ double_register_names_[RegisterConfiguration::kMaxFPRegisters];
static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
- RegisterConfiguration::kMaxDoubleRegisters)];
-
+ RegisterConfiguration::kMaxFPRegisters)];
namespace {
static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
@@ -35,7 +34,7 @@ static void InitializeRegisterNames() {
loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
*loc++ = 0;
}
- for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
+ for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
double_register_names_[i] = loc;
loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
*loc++ = 0;
@@ -56,7 +55,7 @@ InstructionSequenceTest::InstructionSequenceTest()
void InstructionSequenceTest::SetNumRegs(int num_general_registers,
int num_double_registers) {
- CHECK(config_.is_empty());
+ CHECK(!config_);
CHECK(instructions_.empty());
CHECK(instruction_blocks_.empty());
num_general_registers_ = num_general_registers;
@@ -65,12 +64,17 @@ void InstructionSequenceTest::SetNumRegs(int num_general_registers,
RegisterConfiguration* InstructionSequenceTest::config() {
- if (config_.is_empty()) {
- config_.Reset(new RegisterConfiguration(
+ if (!config_) {
+ config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
num_double_registers_, num_double_registers_, allocatable_codes,
- allocatable_double_codes, general_register_names_,
- double_register_names_));
+ allocatable_double_codes,
+ kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
+ : RegisterConfiguration::COMBINE,
+ general_register_names_,
+ double_register_names_, // float register names
+ double_register_names_,
+ double_register_names_)); // SIMD 128 register names
}
return config_.get();
}
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index eb86bd9174..956f5d55b9 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -5,6 +5,8 @@
#ifndef V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
#define V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+#include <memory>
+
#include "src/compiler/instruction.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -229,7 +231,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
typedef std::map<int, const Instruction*> Instructions;
typedef std::vector<BlockCompletion> Completions;
- base::SmartPointer<RegisterConfiguration> config_;
+ std::unique_ptr<RegisterConfiguration> config_;
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
@@ -241,6 +243,8 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
LoopBlocks loop_blocks_;
InstructionBlock* current_block_;
bool block_returns_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionSequenceTest);
};
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 08f3038754..06ac524111 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -123,72 +123,122 @@ TEST_F(Int64LoweringTest, Int64Constant) {
IsInt32Constant(high_word_value(0)), start(), start()));
}
-TEST_F(Int64LoweringTest, Int64Load) {
- int32_t base = 0x1234;
- int32_t index = 0x5678;
-
- LowerGraph(graph()->NewNode(machine()->Load(MachineType::Int64()),
- Int32Constant(base), Int32Constant(index),
- start(), start()),
- MachineRepresentation::kWord64);
-
- Capture<Node*> high_word_load;
- Matcher<Node*> high_word_load_matcher =
- IsLoad(MachineType::Int32(), IsInt32Constant(base),
- IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), start(),
- start());
-
- EXPECT_THAT(
- graph()->end()->InputAt(1),
- IsReturn2(IsLoad(MachineType::Int32(), IsInt32Constant(base),
- IsInt32Constant(index), AllOf(CaptureEq(&high_word_load),
- high_word_load_matcher),
- start()),
- AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
- start(), start()));
-}
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define LOAD_VERIFY(kLoad) \
+ Matcher<Node*> high_word_load_matcher = \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), \
+ start(), start()); \
+ \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn2( \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Constant(index), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), \
+ start()), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), start(), \
+ start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+#define LOAD_VERIFY(kLoad) \
+ Matcher<Node*> high_word_load_matcher = \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Constant(index), start(), start()); \
+ \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn2( \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), \
+ start()), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), start(), \
+ start()));
+#endif
+
+#define INT64_LOAD_LOWERING(kLoad) \
+ int32_t base = 0x1234; \
+ int32_t index = 0x5678; \
+ \
+ LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \
+ Int32Constant(base), Int32Constant(index), \
+ start(), start()), \
+ MachineRepresentation::kWord64); \
+ \
+ Capture<Node*> high_word_load; \
+ LOAD_VERIFY(kLoad)
+
+TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); }
+
+TEST_F(Int64LoweringTest, UnalignedInt64Load) {
+ INT64_LOAD_LOWERING(UnalignedLoad);
+}
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define STORE_VERIFY(kStore, kRep) \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn(IsInt32Constant(return_value), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), IsInt32Constant(index), \
+ IsInt32Constant(low_word_value(0)), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)), \
+ IsInt32Constant(high_word_value(0)), start(), start()), \
+ start()), \
+ start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+#define STORE_VERIFY(kStore, kRep) \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn(IsInt32Constant(return_value), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)), \
+ IsInt32Constant(low_word_value(0)), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), IsInt32Constant(index), \
+ IsInt32Constant(high_word_value(0)), start(), start()), \
+ start()), \
+ start()));
+#endif
+
+#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \
+ int32_t base = 1111; \
+ int32_t index = 2222; \
+ int32_t return_value = 0x5555; \
+ \
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
+ sig_builder.AddReturn(MachineRepresentation::kWord32); \
+ \
+ Node* store = graph()->NewNode(machine()->kStore(kRep64), \
+ Int32Constant(base), Int32Constant(index), \
+ Int64Constant(value(0)), start(), start()); \
+ \
+ Node* ret = graph()->NewNode(common()->Return(), \
+ Int32Constant(return_value), store, start()); \
+ \
+ NodeProperties::MergeControlToEnd(graph(), common(), ret); \
+ \
+ Int64Lowering lowering(graph(), machine(), common(), zone(), \
+ sig_builder.Build()); \
+ lowering.LowerGraph(); \
+ \
+ STORE_VERIFY(kStore, kRep32)
TEST_F(Int64LoweringTest, Int64Store) {
- // We have to build the TF graph explicitly here because Store does not return
- // a value.
-
- int32_t base = 1111;
- int32_t index = 2222;
- int32_t return_value = 0x5555;
-
- Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
- sig_builder.AddReturn(MachineRepresentation::kWord32);
-
- Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord64,
- WriteBarrierKind::kNoWriteBarrier)),
- Int32Constant(base), Int32Constant(index), Int64Constant(value(0)),
- start(), start());
-
- Node* ret = graph()->NewNode(common()->Return(), Int32Constant(return_value),
- store, start());
-
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
-
- Int64Lowering lowering(graph(), machine(), common(), zone(),
- sig_builder.Build());
- lowering.LowerGraph();
-
- const StoreRepresentation rep(MachineRepresentation::kWord32,
- kNoWriteBarrier);
+ const StoreRepresentation rep64(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier);
+ const StoreRepresentation rep32(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier);
+ INT64_STORE_LOWERING(Store, rep32, rep64);
+}
- EXPECT_THAT(
- graph()->end()->InputAt(1),
- IsReturn(
- IsInt32Constant(return_value),
- IsStore(
- rep, IsInt32Constant(base), IsInt32Constant(index),
- IsInt32Constant(low_word_value(0)),
- IsStore(rep, IsInt32Constant(base),
- IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)),
- IsInt32Constant(high_word_value(0)), start(), start()),
- start()),
- start()));
+TEST_F(Int64LoweringTest, Int64UnalignedStore) {
+ const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
+ const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
+ INT64_STORE_LOWERING(UnalignedStore, rep32, rep64);
}
TEST_F(Int64LoweringTest, Int64And) {
@@ -301,9 +351,6 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
}
-// todo(ahaas): I added a list of missing instructions here to make merging
-// easier when I do them one by one.
-// kExprI64Add:
TEST_F(Int64LoweringTest, Int64Add) {
LowerGraph(graph()->NewNode(machine()->Int64Add(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -319,7 +366,7 @@ TEST_F(Int64LoweringTest, Int64Add) {
IsProjection(1, AllOf(CaptureEq(&add), add_matcher)),
start(), start()));
}
-// kExprI64Sub:
+
TEST_F(Int64LoweringTest, Int64Sub) {
LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -336,7 +383,6 @@ TEST_F(Int64LoweringTest, Int64Sub) {
start(), start()));
}
-// kExprI64Mul:
TEST_F(Int64LoweringTest, Int64Mul) {
LowerGraph(graph()->NewNode(machine()->Int64Mul(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -354,11 +400,6 @@ TEST_F(Int64LoweringTest, Int64Mul) {
start(), start()));
}
-// kExprI64DivS:
-// kExprI64DivU:
-// kExprI64RemS:
-// kExprI64RemU:
-// kExprI64Ior:
TEST_F(Int64LoweringTest, Int64Ior) {
LowerGraph(graph()->NewNode(machine()->Word64Or(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -371,7 +412,6 @@ TEST_F(Int64LoweringTest, Int64Ior) {
start(), start()));
}
-// kExprI64Xor:
TEST_F(Int64LoweringTest, Int64Xor) {
LowerGraph(graph()->NewNode(machine()->Word64Xor(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -383,7 +423,7 @@ TEST_F(Int64LoweringTest, Int64Xor) {
IsInt32Constant(high_word_value(1))),
start(), start()));
}
-// kExprI64Shl:
+
TEST_F(Int64LoweringTest, Int64Shl) {
LowerGraph(graph()->NewNode(machine()->Word64Shl(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -399,7 +439,7 @@ TEST_F(Int64LoweringTest, Int64Shl) {
IsProjection(1, AllOf(CaptureEq(&shl), shl_matcher)),
start(), start()));
}
-// kExprI64ShrU:
+
TEST_F(Int64LoweringTest, Int64ShrU) {
LowerGraph(graph()->NewNode(machine()->Word64Shr(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -415,7 +455,7 @@ TEST_F(Int64LoweringTest, Int64ShrU) {
IsProjection(1, AllOf(CaptureEq(&shr), shr_matcher)),
start(), start()));
}
-// kExprI64ShrS:
+
TEST_F(Int64LoweringTest, Int64ShrS) {
LowerGraph(graph()->NewNode(machine()->Word64Sar(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -431,7 +471,7 @@ TEST_F(Int64LoweringTest, Int64ShrS) {
IsProjection(1, AllOf(CaptureEq(&sar), sar_matcher)),
start(), start()));
}
-// kExprI64Eq:
+
TEST_F(Int64LoweringTest, Int64Eq) {
LowerGraph(graph()->NewNode(machine()->Word64Equal(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -447,27 +487,25 @@ TEST_F(Int64LoweringTest, Int64Eq) {
start(), start()));
}
-// kExprI64LtS:
TEST_F(Int64LoweringTest, Int64LtS) {
TestComparison(machine()->Int64LessThan(), IsInt32LessThan, IsUint32LessThan);
}
-// kExprI64LeS:
+
TEST_F(Int64LoweringTest, Int64LeS) {
TestComparison(machine()->Int64LessThanOrEqual(), IsInt32LessThan,
IsUint32LessThanOrEqual);
}
-// kExprI64LtU:
+
TEST_F(Int64LoweringTest, Int64LtU) {
TestComparison(machine()->Uint64LessThan(), IsUint32LessThan,
IsUint32LessThan);
}
-// kExprI64LeU:
+
TEST_F(Int64LoweringTest, Int64LeU) {
TestComparison(machine()->Uint64LessThanOrEqual(), IsUint32LessThan,
IsUint32LessThanOrEqual);
}
-// kExprI32ConvertI64:
TEST_F(Int64LoweringTest, I32ConvertI64) {
LowerGraph(graph()->NewNode(machine()->TruncateInt64ToInt32(),
Int64Constant(value(0))),
@@ -475,7 +513,7 @@ TEST_F(Int64LoweringTest, I32ConvertI64) {
EXPECT_THAT(graph()->end()->InputAt(1),
IsReturn(IsInt32Constant(low_word_value(0)), start(), start()));
}
-// kExprI64SConvertI32:
+
TEST_F(Int64LoweringTest, I64SConvertI32) {
LowerGraph(graph()->NewNode(machine()->ChangeInt32ToInt64(),
Int32Constant(low_word_value(0))),
@@ -501,7 +539,7 @@ TEST_F(Int64LoweringTest, I64SConvertI32_2) {
IsInt32Constant(31)),
start(), start()));
}
-// kExprI64UConvertI32:
+
TEST_F(Int64LoweringTest, I64UConvertI32) {
LowerGraph(graph()->NewNode(machine()->ChangeUint32ToUint64(),
Int32Constant(low_word_value(0))),
@@ -523,7 +561,7 @@ TEST_F(Int64LoweringTest, I64UConvertI32_2) {
IsReturn2(IsInt32Constant(low_word_value(0)), IsInt32Constant(0),
start(), start()));
}
-// kExprF64ReinterpretI64:
+
TEST_F(Int64LoweringTest, F64ReinterpretI64) {
LowerGraph(graph()->NewNode(machine()->BitcastInt64ToFloat64(),
Int64Constant(value(0))),
@@ -538,12 +576,13 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(0), IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
+ IsInt32Constant(low_word_value(0)),
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(4), IsInt32Constant(high_word_value(0)),
- start(), start()),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
+ IsInt32Constant(high_word_value(0)), start(), start()),
start());
EXPECT_THAT(
@@ -554,7 +593,7 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
AllOf(CaptureEq(&store_capture), store_matcher), start()),
start(), start()));
}
-// kExprI64ReinterpretF64:
+
TEST_F(Int64LoweringTest, I64ReinterpretF64) {
LowerGraph(graph()->NewNode(machine()->BitcastFloat64ToInt64(),
Float64Constant(bit_cast<double>(value(0)))),
@@ -575,15 +614,15 @@ TEST_F(Int64LoweringTest, I64ReinterpretF64) {
graph()->end()->InputAt(1),
IsReturn2(IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0x4),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
start(), start()));
}
-// kExprI64Clz:
+
TEST_F(Int64LoweringTest, I64Clz) {
LowerGraph(graph()->NewNode(machine()->Word64Clz(), Int64Constant(value(0))),
MachineRepresentation::kWord64);
@@ -606,9 +645,9 @@ TEST_F(Int64LoweringTest, I64Clz) {
AllOf(CaptureEq(&branch_capture), branch_matcher)))),
IsInt32Constant(0), start(), start()));
}
-// kExprI64Ctz:
+
TEST_F(Int64LoweringTest, I64Ctz) {
- LowerGraph(graph()->NewNode(machine()->Word64CtzPlaceholder(),
+ LowerGraph(graph()->NewNode(machine()->Word64Ctz().placeholder(),
Int64Constant(value(0))),
MachineRepresentation::kWord64);
Capture<Node*> branch_capture;
@@ -628,7 +667,6 @@ TEST_F(Int64LoweringTest, I64Ctz) {
AllOf(CaptureEq(&branch_capture), branch_matcher)))),
IsInt32Constant(0), start(), start()));
}
-// kExprI64Popcnt:
TEST_F(Int64LoweringTest, Dfs) {
Node* common = Int64Constant(value(0));
@@ -649,7 +687,7 @@ TEST_F(Int64LoweringTest, Dfs) {
}
TEST_F(Int64LoweringTest, I64Popcnt) {
- LowerGraph(graph()->NewNode(machine()->Word64PopcntPlaceholder(),
+ LowerGraph(graph()->NewNode(machine()->Word64Popcnt().placeholder(),
Int64Constant(value(0))),
MachineRepresentation::kWord64);
@@ -800,6 +838,17 @@ TEST_F(Int64LoweringTest, I64PhiWord32) {
TestPhi(this, MachineRepresentation::kWord32, Float32Constant(1),
Float32Constant(2));
}
+
+TEST_F(Int64LoweringTest, I64ReverseBytes) {
+ LowerGraph(graph()->NewNode(machine()->Word64ReverseBytes().placeholder(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord64);
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsWord32ReverseBytes(IsInt32Constant(high_word_value(0))),
+ IsWord32ReverseBytes(IsInt32Constant(low_word_value(0))),
+ start(), start()));
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 0f8eed7958..ed20e64194 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -33,7 +33,8 @@ class JSBuiltinReducerTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSBuiltinReducer reducer(&graph_reducer, &jsgraph);
+ JSBuiltinReducer reducer(&graph_reducer, &jsgraph,
+ JSBuiltinReducer::kNoFlags, nullptr);
return reducer.Reduce(node);
}
@@ -49,6 +50,32 @@ class JSBuiltinReducerTest : public TypedGraphTest {
return HeapConstant(f);
}
+ Node* NumberFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("Number"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
+ Node* StringFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("String"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -74,28 +101,49 @@ Type* const kNumberTypes[] = {
// -----------------------------------------------------------------------------
-// Math.max
+// Math.abs
+TEST_F(JSBuiltinReducerTest, MathAbsWithNumber) {
+ Node* function = MathFunction("abs");
-TEST_F(JSBuiltinReducerTest, MathMax0) {
- Node* function = MathFunction("max");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAbs(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAbsWithPlainPrimitive) {
+ Node* function = MathFunction("abs");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
- UndefinedConstant(), context, frame_state,
- frame_state, effect, control);
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+ EXPECT_THAT(r.replacement(), IsNumberAbs(IsPlainPrimitiveToNumber(p0)));
}
+// -----------------------------------------------------------------------------
+// Math.acos
-TEST_F(JSBuiltinReducerTest, MathMax1) {
- Node* function = MathFunction("max");
+TEST_F(JSBuiltinReducerTest, MathAcosWithNumber) {
+ Node* function = MathFunction("acos");
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -105,73 +153,1094 @@ TEST_F(JSBuiltinReducerTest, MathMax1) {
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
- frame_state, effect, control);
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
+ EXPECT_THAT(r.replacement(), IsNumberAcos(p0));
}
}
+TEST_F(JSBuiltinReducerTest, MathAcosWithPlainPrimitive) {
+ Node* function = MathFunction("acos");
-TEST_F(JSBuiltinReducerTest, MathMax2) {
- Node* function = MathFunction("max");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcos(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.acosh
+
+TEST_F(JSBuiltinReducerTest, MathAcoshWithNumber) {
+ Node* function = MathFunction("acosh");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kIntegral32Types) {
- TRACED_FOREACH(Type*, t1, kIntegral32Types) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcosh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAcoshWithPlainPrimitive) {
+ Node* function = MathFunction("acosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcosh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.asin
+
+TEST_F(JSBuiltinReducerTest, MathAsinWithNumber) {
+ Node* function = MathFunction("asin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsin(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAsinWithPlainPrimitive) {
+ Node* function = MathFunction("asin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsin(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.asinh
+
+TEST_F(JSBuiltinReducerTest, MathAsinhWithNumber) {
+ Node* function = MathFunction("asinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsinh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAsinhWithPlainPrimitive) {
+ Node* function = MathFunction("asinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsinh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithNumber) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithPlainPrimitive) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atanh
+
+TEST_F(JSBuiltinReducerTest, MathAtanhWithNumber) {
+ Node* function = MathFunction("atanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtanh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtanhWithPlainPrimitive) {
+ Node* function = MathFunction("atanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtanh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan2
+
+TEST_F(JSBuiltinReducerTest, MathAtan2WithNumber) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p1 = Parameter(t1, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
+ frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kNone,
- IsNumberLessThan(p1, p0), p0, p1));
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(p0, p1));
}
}
}
+TEST_F(JSBuiltinReducerTest, MathAtan2WithPlainPrimitive) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithNumber) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithPlainPrimitive) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.clz32
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithUnsigned32) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(p0));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithNumber) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(IsNumberToUint32(p0)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithPlainPrimitive) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberClz32(IsNumberToUint32(IsPlainPrimitiveToNumber(p0))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.cos
+
+TEST_F(JSBuiltinReducerTest, MathCosWithNumber) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCosWithPlainPrimitive) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.cosh
+
+TEST_F(JSBuiltinReducerTest, MathCoshWithNumber) {
+ Node* function = MathFunction("cosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCosh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCoshWithPlainPrimitive) {
+ Node* function = MathFunction("cosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCosh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.exp
+
+TEST_F(JSBuiltinReducerTest, MathExpWithNumber) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathExpWithPlainPrimitive) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithNumber) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithPlainPrimitive) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.fround
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithNumber) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithPlainPrimitive) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(IsPlainPrimitiveToNumber(p0)));
+}
// -----------------------------------------------------------------------------
// Math.imul
+TEST_F(JSBuiltinReducerTest, MathImulWithUnsigned32) {
+ Node* function = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* p1 = Parameter(Type::Unsigned32(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberImul(p0, p1));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithNumber) {
+ Node* function = MathFunction("imul");
-TEST_F(JSBuiltinReducerTest, MathImul) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* p1 = Parameter(Type::Number(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithPlainPrimitive) {
Node* function = MathFunction("imul");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(IsPlainPrimitiveToNumber(p0)),
+ IsNumberToUint32(IsPlainPrimitiveToNumber(p1))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log
+
+TEST_F(JSBuiltinReducerTest, MathLogWithNumber) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLogWithPlainPrimitive) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log1p
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithNumber) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithPlainPrimitive) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithNoArguments) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithNumber) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithPlainPrimitive) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMax(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.min
+
+TEST_F(JSBuiltinReducerTest, MathMinWithNoArguments) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(V8_INFINITY));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMinWithNumber) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathMinWithPlainPrimitive) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMin(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.round
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithNumber) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithPlainPrimitive) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.pow
+
+TEST_F(JSBuiltinReducerTest, MathPowWithNumber) {
+ Node* function = MathFunction("pow");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
+ Node* p1 = Parameter(t1, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
+ frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
+ EXPECT_THAT(r.replacement(), IsNumberPow(p0, p1));
}
}
}
+TEST_F(JSBuiltinReducerTest, MathPowWithPlainPrimitive) {
+ Node* function = MathFunction("pow");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberPow(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
// -----------------------------------------------------------------------------
-// Math.fround
+// Math.sign
+TEST_F(JSBuiltinReducerTest, MathSignWithNumber) {
+ Node* function = MathFunction("sign");
-TEST_F(JSBuiltinReducerTest, MathFround) {
- Node* function = MathFunction("fround");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSign(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSignWithPlainPrimitive) {
+ Node* function = MathFunction("sign");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSign(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sin
+
+TEST_F(JSBuiltinReducerTest, MathSinWithNumber) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSinWithPlainPrimitive) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sinh
+
+TEST_F(JSBuiltinReducerTest, MathSinhWithNumber) {
+ Node* function = MathFunction("sinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSinh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSinhWithPlainPrimitive) {
+ Node* function = MathFunction("sinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSinh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithNumber) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithPlainPrimitive) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.tan
+
+TEST_F(JSBuiltinReducerTest, MathTanWithNumber) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTanWithPlainPrimitive) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.tanh
+
+TEST_F(JSBuiltinReducerTest, MathTanhWithNumber) {
+ Node* function = MathFunction("tanh");
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -181,14 +1250,152 @@ TEST_F(JSBuiltinReducerTest, MathFround) {
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTanh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTanhWithPlainPrimitive) {
+ Node* function = MathFunction("tanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTanh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.trunc
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithNumber) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithPlainPrimitive) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Number.parseInt
+
+TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32) {
+ Node* function = NumberFunction("parseInt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32AndUndefined) {
+ Node* function = NumberFunction("parseInt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(Type::Undefined(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// String.fromCharCode
+
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithNumber) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsStringFromCharCode(p0));
}
}
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithPlainPrimitive) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStringFromCharCode(IsPlainPrimitiveToNumber(p0)));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 837c5742d9..9c001e9eb2 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -80,95 +81,74 @@ TEST_F(JSCreateLoweringTest, JSCreate) {
// -----------------------------------------------------------------------------
// JSCreateArguments
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsViaStub) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
- closure, context, frame_state, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kMappedArguments),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastNewStrictArguments(isolate()).code()),
- closure, context, frame_state, effect, control));
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSSloppyArgumentsObject::kSize), _, _),
+ _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsRestParameterViaStub) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
- closure, context, frame_state, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastNewRestParameter(isolate()).code()),
- closure, context, frame_state, effect, control));
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSStrictArgumentsObject::kSize), _, _),
+ _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
Node* const frame_state_outer = FrameState(shared, graph()->start());
Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kMappedArguments),
- closure, context, frame_state_inner, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSSloppyArgumentsObject::kSize),
- _, control),
- _));
+ EXPECT_THAT(
+ r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(JSArray::kSize), _, _), _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
- closure, context, frame_state_inner, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSStrictArgumentsObject::kSize),
- _, control),
- _));
-}
+// -----------------------------------------------------------------------------
+// JSCreateClosure
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
- Node* const closure = Parameter(Type::Any());
+TEST_F(JSCreateLoweringTest, JSCreateClosureViaInlinedAllocation) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
- closure, context, frame_state_inner, effect, control));
+ Handle<SharedFunctionInfo> shared(isolate()->number_function()->shared());
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
+ context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
+ IsFinishRegion(IsAllocate(IsNumberConstant(JSFunction::kSize),
+ IsBeginRegion(_), control),
+ _));
}
// -----------------------------------------------------------------------------
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index de0eefc531..780bf65df3 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -29,10 +29,9 @@ class JSIntrinsicLoweringTest : public GraphTest {
~JSIntrinsicLoweringTest() override {}
protected:
- Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
- flags);
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone(),
+ MachineType::PointerRepresentation());
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
@@ -43,13 +42,6 @@ class JSIntrinsicLoweringTest : public GraphTest {
return reducer.Reduce(node);
}
- Node* EmptyFrameState() {
- MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- return jsgraph.EmptyFrameState();
- }
-
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -58,63 +50,6 @@ class JSIntrinsicLoweringTest : public GraphTest {
// -----------------------------------------------------------------------------
-// %_ConstructDouble
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineConstructDouble, 2), input0,
- input1, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
- IsFloat64InsertLowWord32(
- IsNumberConstant(BitEq(0.0)), input1),
- input0));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_DoubleLo
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleLo, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFloat64ExtractLowWord32(IsGuard(Type::Number(), input, _)));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_DoubleHi
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleHi, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFloat64ExtractHighWord32(IsGuard(Type::Number(), input, _)));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsSmi
@@ -151,11 +86,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_ARRAY_TYPE)),
+ IsNumberConstant(JS_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -182,11 +117,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_TYPED_ARRAY_TYPE)),
+ IsNumberConstant(JS_TYPED_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -213,11 +148,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_REGEXP_TYPE)),
+ IsNumberConstant(JS_REGEXP_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -240,67 +175,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
EXPECT_THAT(r.replacement(), IsObjectIsReceiver(input));
}
-
-// -----------------------------------------------------------------------------
-// %_ValueOf
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineValueOf, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch0, if_false0, branch1, if_true1;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, input,
- IsPhi(MachineRepresentation::kTagged,
- IsLoadField(AccessBuilder::ForValue(), input, effect,
- CaptureEq(&if_true1)),
- input,
- IsMerge(
- AllOf(CaptureEq(&if_true1), IsIfTrue(CaptureEq(&branch1))),
- IsIfFalse(AllOf(
- CaptureEq(&branch1),
- IsBranch(
- IsWord32Equal(
- IsLoadField(
- AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false0)),
- effect, _),
- IsInt32Constant(JS_VALUE_TYPE)),
- CaptureEq(&if_false0)))))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch0),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false0), IsIfFalse(CaptureEq(&branch0))))));
-}
-
-// -----------------------------------------------------------------------------
-// %_GetOrdinaryHasInstance
-
-TEST_F(JSIntrinsicLoweringTest, InlineGetOrdinaryHasInstance) {
- Node* const context = Parameter(0);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineGetOrdinaryHasInstance, 0),
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadContext(
- ContextAccess(0, Context::ORDINARY_HAS_INSTANCE_INDEX, true), _));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 15b1427871..3b83d691f1 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -40,25 +40,13 @@ const SharedOperator kSharedOperators[] = {
control_input_count, value_output_count, effect_output_count, \
control_output_count \
}
- SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
- SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
- SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
- SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
- SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(ToObject, Operator::kFoldable, 1, 1, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
- SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(TypeOf, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
- SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 1adb5dae93..72c582525e 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/js-typed-lowering.h"
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -65,8 +66,7 @@ const double kIntegerValues[] = {-V8_INFINITY, INT_MIN, -1000.0, -42.0,
Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
Type::Number(), Type::String(), Type::Object()};
-
-STATIC_ASSERT(LANGUAGE_END == 3);
+STATIC_ASSERT(LANGUAGE_END == 2);
const LanguageMode kLanguageModes[] = {SLOPPY, STRICT};
} // namespace
@@ -212,9 +212,8 @@ TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -242,9 +241,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -258,9 +256,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -269,9 +266,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -280,21 +276,28 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
}
+TEST_F(JSTypedLoweringTest, JSToBooleanWithNumber) {
+ Node* input = Parameter(Type::Number(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberLessThan(IsNumberConstant(0.0), IsNumberAbs(input)));
+}
TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
Node* input = Parameter(Type::String(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -307,9 +310,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_FALSE(r.Changed());
}
@@ -327,8 +329,7 @@ TEST_F(JSTypedLoweringTest, JSToNumberWithPlainPrimitive) {
Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(BitEq(0.0)),
- graph()->start(), control));
+ EXPECT_THAT(r.replacement(), IsPlainPrimitiveToNumber(input));
}
@@ -389,11 +390,13 @@ TEST_F(JSTypedLoweringTest, JSToStringWithBoolean) {
TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
Node* const the_hole = HeapConstant(factory()->the_hole_value());
Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
TRACED_FOREACH(Type*, type, kJSTypes) {
Node* const lhs = Parameter(type);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole,
- context, graph()->start(), graph()->start()));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ lhs, the_hole, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -404,9 +407,11 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
Node* const lhs = Parameter(Type::Unique(), 0);
Node* const rhs = Parameter(Type::Unique(), 1);
Node* const context = Parameter(Type::Any(), 2);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context,
- graph()->start(), graph()->start()));
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ lhs, rhs, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
}
@@ -415,53 +420,95 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
// -----------------------------------------------------------------------------
// JSShiftLeft
-
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftLeft(hints), lhs, NumberConstant(rhs), context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs,
+ NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
}
}
-
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hints), lhs,
- rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftLeft(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftLeft(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftLeft(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSShiftRight
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRight(hints), lhs, NumberConstant(rhs), context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
@@ -470,19 +517,63 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hints), lhs,
- rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs, rhs, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRight(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRight(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRight(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSShiftRightLogical
@@ -490,15 +581,15 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
TEST_F(JSTypedLoweringTest,
JSShiftRightLogicalWithUnsigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRightLogical(hints), lhs, NumberConstant(rhs),
- context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
@@ -507,19 +598,63 @@ TEST_F(JSTypedLoweringTest,
TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Unsigned32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hints),
- lhs, rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint), lhs, rhs,
+ context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kSignedSmall, lhs, rhs,
+ effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kSigned32, lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSLoadContext
@@ -611,14 +746,15 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(key,
+ IsNumberConstant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -652,9 +788,9 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -693,14 +829,14 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(
+ key, IsNumberConstant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -734,21 +870,26 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
+ // TODO(mstarzinger): Once the effect-control-linearizer provides a frame
+ // state we can get rid of this checkpoint again. The reducer won't care.
+ Node* checkpoint = graph()->NewNode(common()->Checkpoint(),
+ EmptyFrameState(), effect, control);
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), checkpoint, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(
+ key, IsNumberConstant(WhichPowerOf2(element_size)));
Matcher<Node*> value_matcher =
- IsToNumber(value, context, effect, control);
+ IsToNumber(value, context, checkpoint, control);
Matcher<Node*> effect_matcher = value_matcher;
+ Matcher<Node*> control_matcher = IsIfSuccess(value_matcher);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -757,7 +898,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
offset_matcher,
IsNumberConstant(array->byte_length()->Number()),
- value_matcher, effect_matcher, control));
+ value_matcher, effect_matcher, control_matcher));
}
}
}
@@ -787,8 +928,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -814,64 +954,78 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->LoadNamed(name, feedback), receiver, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
+ vector, context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
receiver, effect, control));
}
-TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
- VectorSlotPair feedback;
- Handle<Name> name = factory()->prototype_string();
- Handle<JSFunction> function = isolate()->object_function();
- Handle<JSObject> function_prototype(JSObject::cast(function->prototype()));
- Node* const receiver = Parameter(Type::Constant(function, zone()), 0);
- Node* const vector = Parameter(Type::Internal(), 1);
- Node* const context = Parameter(Type::Internal(), 2);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->LoadNamed(name, feedback), receiver, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
-}
-
-
// -----------------------------------------------------------------------------
// JSAdd
TEST_F(JSTypedLoweringTest, JSAddWithString) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
- Node* lhs = Parameter(Type::String(), 0);
- Node* rhs = Parameter(Type::String(), 1);
- Node* context = Parameter(Type::Any(), 2);
- Node* frame_state0 = EmptyFrameState();
- Node* frame_state1 = EmptyFrameState();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Reduction r =
- Reduce(graph()->NewNode(javascript()->Add(hints), lhs, rhs, context,
- frame_state0, frame_state1, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE,
- NOT_TENURED).code()),
- lhs, rhs, context, frame_state0, effect, control));
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
+ Node* lhs = Parameter(Type::String(), 0);
+ Node* rhs = Parameter(Type::String(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::StringAdd(
+ isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED)
+ .code()),
+ lhs, rhs, context, frame_state, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSAddSmis) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberAdd(NumberOperationHint::kSignedSmall, lhs,
+ rhs, effect, control));
}
+// -----------------------------------------------------------------------------
+// JSSubtract
+
+TEST_F(JSTypedLoweringTest, JSSubtractSmis) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Subtract(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberSubtract(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSInstanceOf
// Test that instanceOf is reduced if and only if the right-hand side is a
// function constant. Functional correctness is ensured elsewhere.
-
TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithoutSmiCheck) {
Node* const context = Parameter(Type::Any());
Node* const frame_state = EmptyFrameState();
@@ -929,6 +1083,150 @@ TEST_F(JSTypedLoweringTest, JSInstanceOfNoSpecialization) {
ASSERT_EQ(instanceOf, dummy->InputAt(0));
}
+// -----------------------------------------------------------------------------
+// JSBitwiseAnd
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseAnd(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseAnd(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseAnd(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
+// -----------------------------------------------------------------------------
+// JSBitwiseOr
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseOr(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseOr(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseOr(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
+// -----------------------------------------------------------------------------
+// JSBitwiseXor
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseXor(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseXor(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseXor(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 597edde665..741021a446 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -26,27 +26,25 @@ class LinkageTailCall : public TestWithZone {
CallDescriptor* NewStandardCallDescriptor(LocationSignature* locations) {
DCHECK(arraysize(kMachineTypes) >=
locations->return_count() + locations->parameter_count());
- MachineSignature* types = new (zone()) MachineSignature(
- locations->return_count(), locations->parameter_count(), kMachineTypes);
- return new (zone()) CallDescriptor(CallDescriptor::kCallCodeObject,
- MachineType::AnyTagged(),
- LinkageLocation::ForAnyRegister(),
- types, // machine_sig
- locations, // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- 0, // callee-saved
- 0, // callee-saved fp
- CallDescriptor::kNoFlags, // flags,
- "");
+ USE(kMachineTypes);
+ return new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForAnyRegister(MachineType::Pointer()),
+ locations, // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ 0, // callee-saved
+ 0, // callee-saved fp
+ CallDescriptor::kNoFlags, // flags,
+ "");
}
LinkageLocation StackLocation(int loc) {
- return LinkageLocation::ForCallerFrameSlot(-loc);
+ return LinkageLocation::ForCallerFrameSlot(-loc, MachineType::Pointer());
}
LinkageLocation RegisterLocation(int loc) {
- return LinkageLocation::ForRegister(loc);
+ return LinkageLocation::ForRegister(loc, MachineType::Pointer());
}
};
@@ -57,8 +55,9 @@ TEST_F(LinkageTailCall, EmptyToEmpty) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc->CanTailCall(node));
+ const CallDescriptor* callee = CallDescriptorOf(node->op());
+ int stack_param_delta = callee->GetStackParameterDelta(desc);
EXPECT_EQ(0, stack_param_delta);
}
@@ -75,8 +74,8 @@ TEST_F(LinkageTailCall, SameReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -95,9 +94,7 @@ TEST_F(LinkageTailCall, DifferingReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_FALSE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(0, stack_param_delta);
+ EXPECT_TRUE(!desc1->CanTailCall(node));
}
@@ -116,8 +113,8 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -137,8 +134,8 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -158,9 +155,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(-1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(1, stack_param_delta);
}
@@ -179,9 +176,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(-1, stack_param_delta);
}
@@ -205,8 +202,8 @@ TEST_F(LinkageTailCall, MatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -231,8 +228,8 @@ TEST_F(LinkageTailCall, NonMatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -258,8 +255,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -286,8 +283,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -314,9 +311,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(-1, stack_param_delta);
}
@@ -342,9 +339,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(-1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(1, stack_param_delta);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 38bb151dba..ada99b5a7f 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -1,12 +1,19 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/access-builder.h"
#include "src/compiler/load-elimination.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::StrictMock;
namespace v8 {
namespace internal {
@@ -15,61 +22,195 @@ namespace compiler {
class LoadEliminationTest : public TypedGraphTest {
public:
LoadEliminationTest()
- : TypedGraphTest(3), common_(zone()), simplified_(zone()) {}
+ : TypedGraphTest(3),
+ simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, simplified(), nullptr) {
+ }
~LoadEliminationTest() override {}
protected:
- Reduction Reduce(Node* node) {
- // TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
- LoadElimination reducer(&graph_reducer, graph(), common());
- return reducer.Reduce(node);
- }
-
+ JSGraph* jsgraph() { return &jsgraph_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- CommonOperatorBuilder* common() { return &common_; }
private:
- CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
};
+TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* load1 = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ load_elimination.Reduce(load1);
+
+ Node* load2 = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load2, load1, load1, _));
+ Reduction r = load_elimination.Reduce(load2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load1, r.replacement());
+}
-TEST_F(LoadEliminationTest, LoadFieldWithStoreField) {
- Node* object1 = Parameter(Type::Any(), 0);
- Node* object2 = Parameter(Type::Any(), 1);
+TEST_F(LoadEliminationTest, StoreElementAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect =
+ graph()->NewNode(simplified()->StoreElement(access), object, index, value,
+ effect, control);
+ load_elimination.Reduce(store);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreElementAndStoreFieldAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ Node* value = Parameter(Type::Any(), 2);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store1 = effect =
+ graph()->NewNode(simplified()->StoreElement(access), object, index, value,
+ effect, control);
+ load_elimination.Reduce(store1);
+
+ Node* store2 = effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ object, value, effect, control);
+ load_elimination.Reduce(store2);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store2, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* load1 = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ load_elimination.Reduce(load1);
+
+ Node* load2 = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load2, load1, load1, _));
+ Reduction r = load_elimination.Reduce(load2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load1, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Any(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect = graph()->NewNode(simplified()->StoreField(access),
+ object, value, effect, control);
+ load_elimination.Reduce(store);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Any(), 1);
+ Node* index = Parameter(Type::UnsignedSmall(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store1 = effect = graph()->NewNode(simplified()->StoreField(access),
+ object, value, effect, control);
+ load_elimination.Reduce(store1);
+
+ Node* store2 = effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()), object,
+ index, object, effect, control);
+ load_elimination.Reduce(store2);
- FieldAccess access1 = AccessBuilder::ForContextSlot(42);
- Node* store1 = graph()->NewNode(simplified()->StoreField(access1), object1,
- value, effect, control);
- Reduction r1 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object1, store1, control));
- ASSERT_TRUE(r1.Changed());
- EXPECT_EQ(value, r1.replacement());
-
- FieldAccess access2 = AccessBuilder::ForMap();
- Node* store2 = graph()->NewNode(simplified()->StoreField(access2), object1,
- object2, store1, control);
- Reduction r2 = Reduce(graph()->NewNode(simplified()->LoadField(access2),
- object1, store2, control));
- ASSERT_TRUE(r2.Changed());
- EXPECT_EQ(object2, r2.replacement());
-
- Node* store3 = graph()->NewNode(
- simplified()->StoreBuffer(BufferAccess(kExternalInt8Array)), object2,
- value, Int32Constant(10), object1, store2, control);
-
- Reduction r3 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object2, store3, control));
- ASSERT_FALSE(r3.Changed());
-
- Reduction r4 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object1, store3, control));
- ASSERT_TRUE(r4.Changed());
- EXPECT_EQ(value, r4.replacement());
+ Node* load = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store2, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 9db490560d..56691fdeef 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -28,6 +28,7 @@ struct While {
Node* loop;
Node* branch;
Node* if_true;
+ Node* if_false;
Node* exit;
};
@@ -46,6 +47,7 @@ struct Counter {
Node* inc;
Node* phi;
Node* add;
+ Node* exit_marker;
};
@@ -105,12 +107,14 @@ class LoopPeelingTest : public GraphTest {
While NewWhile(Node* cond, Node* control = nullptr) {
if (control == nullptr) control = start();
- Node* loop = graph()->NewNode(common()->Loop(2), control, control);
- Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* exit = graph()->NewNode(common()->IfFalse(), branch);
- loop->ReplaceInput(1, if_true);
- return {loop, branch, if_true, exit};
+ While w;
+ w.loop = graph()->NewNode(common()->Loop(2), control, control);
+ w.branch = graph()->NewNode(common()->Branch(), cond, w.loop);
+ w.if_true = graph()->NewNode(common()->IfTrue(), w.branch);
+ w.if_false = graph()->NewNode(common()->IfFalse(), w.branch);
+ w.exit = graph()->NewNode(common()->LoopExit(), w.if_false, w.loop);
+ w.loop->ReplaceInput(1, w.if_true);
+ return w;
}
void Chain(While* a, Node* control) { a->loop->ReplaceInput(0, control); }
@@ -124,21 +128,24 @@ class LoopPeelingTest : public GraphTest {
}
Branch NewBranch(Node* cond, Node* control = nullptr) {
+ Branch b;
if (control == nullptr) control = start();
- Node* branch = graph()->NewNode(common()->Branch(), cond, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- return {branch, if_true, if_false};
+ b.branch = graph()->NewNode(common()->Branch(), cond, control);
+ b.if_true = graph()->NewNode(common()->IfTrue(), b.branch);
+ b.if_false = graph()->NewNode(common()->IfFalse(), b.branch);
+ return b;
}
Counter NewCounter(While* w, int32_t b, int32_t k) {
- Node* base = Int32Constant(b);
- Node* inc = Int32Constant(k);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), base, base, w->loop);
- Node* add = graph()->NewNode(machine()->Int32Add(), phi, inc);
- phi->ReplaceInput(1, add);
- return {base, inc, phi, add};
+ Counter c;
+ c.base = Int32Constant(b);
+ c.inc = Int32Constant(k);
+ c.phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ c.base, c.base, w->loop);
+ c.add = graph()->NewNode(machine()->Int32Add(), c.phi, c.inc);
+ c.phi->ReplaceInput(1, c.add);
+ c.exit_marker = graph()->NewNode(common()->LoopExitValue(), c.phi, w->exit);
+ return c;
}
};
@@ -152,14 +159,14 @@ TEST_F(LoopPeelingTest, SimpleLoop) {
Node* br1 = ExpectPeeled(w.branch, peeled);
Node* if_true1 = ExpectPeeled(w.if_true, peeled);
- Node* if_false1 = ExpectPeeled(w.exit, peeled);
+ Node* if_false1 = ExpectPeeled(w.if_false, peeled);
EXPECT_THAT(br1, IsBranch(p0, start()));
EXPECT_THAT(if_true1, IsIfTrue(br1));
EXPECT_THAT(if_false1, IsIfFalse(br1));
EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
- EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.exit, if_false1)));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.if_false, if_false1)));
}
@@ -167,13 +174,13 @@ TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
Node* p0 = Parameter(0);
While w = NewWhile(p0);
Counter c = NewCounter(&w, 0, 1);
- Node* r = InsertReturn(c.phi, start(), w.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), w.exit);
PeeledIteration* peeled = PeelOne();
Node* br1 = ExpectPeeled(w.branch, peeled);
Node* if_true1 = ExpectPeeled(w.if_true, peeled);
- Node* if_false1 = ExpectPeeled(w.exit, peeled);
+ Node* if_false1 = ExpectPeeled(w.if_false, peeled);
EXPECT_THAT(br1, IsBranch(p0, start()));
EXPECT_THAT(if_true1, IsIfTrue(br1));
@@ -182,11 +189,10 @@ TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
- Capture<Node*> merge;
+ EXPECT_THAT(w.exit, IsMerge(w.if_false, if_false1));
EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- AllOf(CaptureEq(&merge), IsMerge(w.exit, if_false1))),
- start(), CaptureEq(&merge)));
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base, w.exit),
+ start(), w.exit));
}
@@ -197,13 +203,13 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
Nest(&inner, &outer);
Counter c = NewCounter(&outer, 0, 1);
- Node* r = InsertReturn(c.phi, start(), outer.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), outer.exit);
PeeledIteration* peeled = PeelOne();
Node* bro = ExpectPeeled(outer.branch, peeled);
Node* if_trueo = ExpectPeeled(outer.if_true, peeled);
- Node* if_falseo = ExpectPeeled(outer.exit, peeled);
+ Node* if_falseo = ExpectPeeled(outer.if_false, peeled);
EXPECT_THAT(bro, IsBranch(p0, start()));
EXPECT_THAT(if_trueo, IsIfTrue(bro));
@@ -211,21 +217,21 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
+ Node* exiti = ExpectPeeled(inner.exit, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(if_falsei, inner.exit));
+ EXPECT_THAT(outer.loop, IsLoop(exiti, inner.exit));
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
Capture<Node*> merge;
- EXPECT_THAT(
- r,
- IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- AllOf(CaptureEq(&merge), IsMerge(outer.exit, if_falseo))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(outer.exit, IsMerge(outer.if_false, if_falseo));
+ EXPECT_THAT(r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
+ outer.exit),
+ start(), outer.exit));
}
@@ -236,7 +242,7 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
Nest(&inner, &outer);
Counter c = NewCounter(&outer, 0, 1);
- Node* r = InsertReturn(c.phi, start(), outer.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), outer.exit);
LoopTree* loop_tree = GetLoopTree();
LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
@@ -248,20 +254,22 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.if_false, peeled);
ExpectNotPeeled(outer.exit, peeled);
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(inner.exit, IsMerge(inner.if_false, if_falsei));
+ EXPECT_THAT(outer.loop, IsLoop(start(), inner.exit));
ExpectNotPeeled(c.add, peeled);
- EXPECT_THAT(r, IsReturn(c.phi, start(), outer.exit));
+ EXPECT_THAT(r, IsReturn(c.exit_marker, start(), outer.exit));
}
@@ -271,7 +279,7 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
While inner = NewWhile(p0);
Nest(&inner, &outer);
Counter c = NewCounter(&inner, 0, 1);
- Node* phi = NewPhi(&outer, Int32Constant(11), c.phi);
+ Node* phi = NewPhi(&outer, Int32Constant(11), c.exit_marker);
Node* r = InsertReturn(phi, start(), outer.exit);
@@ -285,25 +293,26 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.if_false, peeled);
ExpectNotPeeled(outer.exit, peeled);
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(inner.exit, IsMerge(inner.if_false, if_falsei));
+ EXPECT_THAT(outer.loop, IsLoop(start(), inner.exit));
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
- Node* back = phi->InputAt(1);
- EXPECT_THAT(back, IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(c.exit_marker,
+ IsPhi(MachineRepresentation::kTagged, c.phi, c.base, inner.exit));
EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, IsInt32Constant(11),
- back, outer.loop));
+ c.exit_marker, outer.loop));
EXPECT_THAT(r, IsReturn(phi, start(), outer.exit));
}
@@ -318,7 +327,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(p0, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+
+ Node* r = InsertReturn(p0, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -339,7 +350,8 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
EXPECT_THAT(b2f, IsIfFalse(b2b));
EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
- EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(b1.if_false, b1f)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(r, IsReturn(p0, start(), exit));
}
@@ -355,7 +367,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(phi, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit_marker = graph()->NewNode(common()->LoopExitValue(), phi, exit);
+ Node* r = InsertReturn(exit_marker, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -383,11 +397,10 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
IsInt32Constant(2), IsMerge(b2t, b2f)),
IsInt32Constant(1), IsInt32Constant(2), loop));
- Capture<Node*> merge;
- EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
- AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(exit_marker, IsPhi(MachineRepresentation::kTagged, phi,
+ IsInt32Constant(0), exit));
+ EXPECT_THAT(r, IsReturn(exit_marker, start(), exit));
}
@@ -408,7 +421,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(phi, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit_marker = graph()->NewNode(common()->LoopExitValue(), phi, exit);
+ Node* r = InsertReturn(exit_marker, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -443,49 +458,60 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
IsInt32Add(phi, IsInt32Constant(1)),
IsInt32Add(phi, IsInt32Constant(2)), loop));
- Capture<Node*> merge;
- EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
- AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(exit_marker, IsPhi(MachineRepresentation::kTagged, phi,
+ IsInt32Constant(0), exit));
+ EXPECT_THAT(r, IsReturn(exit_marker, start(), exit));
}
-
-TEST_F(LoopPeelingTest, TwoExitLoop_nope) {
+TEST_F(LoopPeelingTest, TwoExitLoop) {
Node* p0 = Parameter(0);
Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
Branch b1 = NewBranch(p0, loop);
Branch b2 = NewBranch(p0, b1.if_true);
loop->ReplaceInput(1, b2.if_true);
- Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, b2.if_false);
- InsertReturn(p0, start(), merge);
- {
- LoopTree* loop_tree = GetLoopTree();
- LoopTree::Loop* loop = loop_tree->outer_loops()[0];
- EXPECT_FALSE(LoopPeeler::CanPeel(loop_tree, loop));
- }
-}
+ Node* exit1 = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit2 = graph()->NewNode(common()->LoopExit(), b2.if_false, loop);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), exit1, exit2);
+ Node* r = InsertReturn(p0, start(), merge);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1p = ExpectPeeled(b1.branch, peeled);
+ Node* if_true1p = ExpectPeeled(b1.if_true, peeled);
+ Node* if_false1p = ExpectPeeled(b1.if_false, peeled);
+ Node* b2p = ExpectPeeled(b2.branch, peeled);
+ Node* if_true2p = ExpectPeeled(b2.if_true, peeled);
+ Node* if_false2p = ExpectPeeled(b2.if_false, peeled);
-const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
- 0, 0, 1, 1, 1, 2);
+ EXPECT_THAT(b1p, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1p, IsIfTrue(b1p));
+ EXPECT_THAT(if_false1p, IsIfFalse(b1p));
+ EXPECT_THAT(b2p, IsBranch(p0, if_true1p));
+ EXPECT_THAT(if_true2p, IsIfTrue(b2p));
+ EXPECT_THAT(if_false2p, IsIfFalse(b2p));
-TEST_F(LoopPeelingTest, TwoExitLoopWithCall_nope) {
+ EXPECT_THAT(exit1, IsMerge(b1.if_false, if_false1p));
+ EXPECT_THAT(exit2, IsMerge(b2.if_false, if_false2p));
+
+ EXPECT_THAT(loop, IsLoop(if_true2p, b2.if_true));
+
+ EXPECT_THAT(merge, IsMerge(exit1, exit2));
+ EXPECT_THAT(r, IsReturn(p0, start(), merge));
+}
+
+TEST_F(LoopPeelingTest, SimpleLoopWithUnmarkedExit) {
Node* p0 = Parameter(0);
Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
- Branch b1 = NewBranch(p0, loop);
-
- Node* call = graph()->NewNode(&kMockCall, b1.if_true);
- Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Branch b = NewBranch(p0, loop);
+ loop->ReplaceInput(1, b.if_true);
- loop->ReplaceInput(1, if_success);
- Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, if_exception);
- InsertReturn(p0, start(), merge);
+ InsertReturn(p0, start(), b.if_false);
{
LoopTree* loop_tree = GetLoopTree();
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 2feba2ef7f..ed426be5d8 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/machine-operator-reducer.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/typer.h"
#include "src/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -16,6 +17,7 @@ using testing::AllOf;
using testing::BitEq;
using testing::Capture;
using testing::CaptureEq;
+using testing::NanSensitiveDoubleEq;
namespace v8 {
namespace internal {
@@ -236,10 +238,6 @@ const uint32_t kUint32Values[] = {
0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
-const TruncationMode kTruncationModes[] = {TruncationMode::kJavaScript,
- TruncationMode::kRoundToZero};
-
-
struct ComparisonBinaryOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
const char* constructor_name;
@@ -291,7 +289,6 @@ TEST_F(MachineOperatorReducerTest,
EXPECT_EQ(value, reduction.replacement());
}
-
TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
TRACED_FOREACH(int32_t, x, kInt32Values) {
Reduction reduction = Reduce(graph()->NewNode(
@@ -413,51 +410,28 @@ TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
// -----------------------------------------------------------------------------
-// TruncateFloat64ToInt32
-
+// TruncateFloat64ToWord32
TEST_F(MachineOperatorReducerTest,
- TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
- TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
- Node* value = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(mode),
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(value, reduction.replacement());
- }
+ TruncateFloat64ToWord32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToWord32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
}
-
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript),
- Float64Constant(x)));
+ machine()->TruncateFloat64ToWord32(), Float64Constant(x)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
}
}
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithPhi) {
- Node* const p0 = Parameter(0);
- Node* const p1 = Parameter(1);
- Node* const merge = graph()->start();
- TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(mode),
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), p0,
- p1, merge)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(
- reduction.replacement(),
- IsPhi(MachineRepresentation::kWord32, IsTruncateFloat64ToInt32(p0),
- IsTruncateFloat64ToInt32(p1), merge));
- }
-}
-
-
// -----------------------------------------------------------------------------
// TruncateInt64ToInt32
@@ -485,8 +459,30 @@ TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
// -----------------------------------------------------------------------------
-// Word32And
+// RoundFloat64ToInt32
+
+TEST_F(MachineOperatorReducerTest,
+ RoundFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->RoundFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+TEST_F(MachineOperatorReducerTest, RoundFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->RoundFloat64ToInt32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(static_cast<int32_t>(x)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word32And
TEST_F(MachineOperatorReducerTest, Word32AndWithWord32ShlWithConstant) {
Node* const p0 = Parameter(0);
@@ -853,8 +849,24 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
// -----------------------------------------------------------------------------
-// Word32Shl
+// Word32Shr
+
+TEST_F(MachineOperatorReducerTest, Word32ShrWithWord32And) {
+ Node* const p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ uint32_t mask = (1 << shift) - 1;
+ Node* node = graph()->NewNode(
+ machine()->Word32Shr(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
+ Int32Constant(shift));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+}
+// -----------------------------------------------------------------------------
+// Word32Shl
TEST_F(MachineOperatorReducerTest, Word32ShlWithZeroShift) {
Node* p0 = Parameter(0);
@@ -1271,28 +1283,31 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithInt32SubWithConstantZero) {
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(0), p0);
+ Int32Constant(0), p0, control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
- Int32Constant(0));
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -1300,18 +1315,20 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1324,33 +1341,36 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
- Node* add =
- graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+ Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(), p0,
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1359,8 +1379,153 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
// -----------------------------------------------------------------------------
-// Uint32LessThan
+// Int32MulWithOverflow
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithZero) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(0), p0, control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(0), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithOne) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(1), p0, control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(1), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithMinusOne) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(-1), p0, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32SubWithOverflow(IsInt32Constant(0), p0));
+ }
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(-1), control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32SubWithOverflow(IsInt32Constant(0), p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithTwo) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(2), p0, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32AddWithOverflow(p0, p0));
+ }
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(2), control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32AddWithOverflow(p0, p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithConstant) {
+ Node* control = graph()->start();
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(x), Int32Constant(y), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedMulOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Int32LessThan
+
+TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Node* word32_or =
+ graph()->NewNode(machine()->Word32Or(), p0, Int32Constant(x));
+ Node* less_than = graph()->NewNode(machine()->Int32LessThan(), word32_or,
+ Int32Constant(0));
+ Reduction r = Reduce(less_than);
+ if (x < 0) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Uint32LessThan
TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
Node* const p0 = Parameter(0);
@@ -1402,10 +1567,266 @@ TEST_F(MachineOperatorReducerTest, Float64MulWithMinusOne) {
}
}
+TEST_F(MachineOperatorReducerTest, Float64SubMinusZeroMinusX) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Sub(), Float64Constant(-0.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Neg(p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float32SubMinusZeroMinusX) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float32Sub(), Float32Constant(-0.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Neg(p0));
+ }
+}
// -----------------------------------------------------------------------------
-// Float64InsertLowWord32
+// Float64Acos
+TEST_F(MachineOperatorReducerTest, Float64AcosWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Acos(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::acos(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Acosh
+
+TEST_F(MachineOperatorReducerTest, Float64AcoshWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Acosh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::acosh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Asin
+
+TEST_F(MachineOperatorReducerTest, Float64AsinWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Asin(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::asin(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Asinh
+
+TEST_F(MachineOperatorReducerTest, Float64AsinhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Asinh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::asinh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atan
+
+TEST_F(MachineOperatorReducerTest, Float64AtanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atanh
+
+TEST_F(MachineOperatorReducerTest, Float64AtanhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atanh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atanh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atan2
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithConstant) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Atan2(), Float64Constant(y), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan2(y, x))));
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithNaN) {
+ Node* const p0 = Parameter(0);
+ Node* const nan = Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), p0, nan));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), nan, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Cos
+
+TEST_F(MachineOperatorReducerTest, Float64CosWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Cos(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cos(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Cosh
+
+TEST_F(MachineOperatorReducerTest, Float64CoshWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Cosh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cosh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Exp
+
+TEST_F(MachineOperatorReducerTest, Float64ExpWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Exp(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::exp(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log
+
+TEST_F(MachineOperatorReducerTest, Float64LogWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log1p
+
+TEST_F(MachineOperatorReducerTest, Float64Log1pWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log1p(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log1p(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Sin
+
+TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Sin(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sin(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Sinh
+
+TEST_F(MachineOperatorReducerTest, Float64SinhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Sinh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sinh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Tan
+
+TEST_F(MachineOperatorReducerTest, Float64TanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Tan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::tan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Tanh
+
+TEST_F(MachineOperatorReducerTest, Float64TanhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Tanh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::tanh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64InsertLowWord32
TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 59eb484dab..400b05828a 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -208,9 +208,7 @@ const PureOperator kPureOperators[] = {
PURE(Word64Ror, 2, 0, 1), // --
PURE(Word64Equal, 2, 0, 1), // --
PURE(Int32Add, 2, 0, 1), // --
- PURE(Int32AddWithOverflow, 2, 0, 2), // --
PURE(Int32Sub, 2, 0, 1), // --
- PURE(Int32SubWithOverflow, 2, 0, 2), // --
PURE(Int32Mul, 2, 0, 1), // --
PURE(Int32MulHigh, 2, 0, 1), // --
PURE(Int32Div, 2, 1, 1), // --
@@ -249,6 +247,7 @@ const PureOperator kPureOperators[] = {
PURE(Float32Equal, 2, 0, 1), // --
PURE(Float32LessThan, 2, 0, 1), // --
PURE(Float32LessThanOrEqual, 2, 0, 1), // --
+ PURE(Float32Neg, 1, 0, 1), // --
PURE(Float64Abs, 1, 0, 1), // --
PURE(Float64Add, 2, 0, 1), // --
PURE(Float64Sub, 2, 0, 1), // --
@@ -256,6 +255,8 @@ const PureOperator kPureOperators[] = {
PURE(Float64Div, 2, 0, 1), // --
PURE(Float64Mod, 2, 0, 1), // --
PURE(Float64Sqrt, 1, 0, 1), // --
+ PURE(Float64Max, 2, 0, 1), // --
+ PURE(Float64Min, 2, 0, 1), // --
PURE(Float64Equal, 2, 0, 1), // --
PURE(Float64LessThan, 2, 0, 1), // --
PURE(Float64LessThanOrEqual, 2, 0, 1), // --
@@ -264,6 +265,7 @@ const PureOperator kPureOperators[] = {
PURE(Float64ExtractHighWord32, 1, 0, 1), // --
PURE(Float64InsertLowWord32, 2, 0, 1), // --
PURE(Float64InsertHighWord32, 2, 0, 1), // --
+ PURE(Float64Neg, 1, 0, 1), // --
#undef PURE
};
@@ -320,10 +322,6 @@ const OptionalOperatorEntry kOptionalOperators[] = {
&MachineOperatorBuilder::Name, MachineOperatorBuilder::k##Name, #Name, \
value_input_count, control_input_count, value_output_count \
}
- OPTIONAL_ENTRY(Float32Max, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float32Min, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float64Max, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float64Min, 2, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 122c398e20..7b5c667261 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -1150,40 +1150,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsFloat32Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsFloat32Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64());
@@ -1192,7 +1158,6 @@ TEST_F(InstructionSelectorTest, Float64Max) {
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1209,7 +1174,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1217,7 +1181,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index d9cd96f471..c82cb9fe4f 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -67,22 +67,20 @@ struct Conversion {
// Logical instructions.
// ----------------------------------------------------------------------------
-
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kMips64And,
+ {&RawMachineAssembler::Word32And, "Word32And", kMips64And32,
MachineType::Int32()},
{&RawMachineAssembler::Word64And, "Word64And", kMips64And,
MachineType::Int64()},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or,
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or32,
MachineType::Int32()},
{&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or,
MachineType::Int64()},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor,
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor32,
MachineType::Int32()},
{&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor,
MachineType::Int64()}};
-
// ----------------------------------------------------------------------------
// Shift instructions.
// ----------------------------------------------------------------------------
@@ -542,7 +540,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -551,7 +549,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -589,7 +587,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -599,7 +597,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
m.Word32Or(m.Parameter(0), m.Parameter(0))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -988,6 +986,89 @@ TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
}
}
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Lbu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lbu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Lb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lb, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Lhu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lhu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Lh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lh, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
// ----------------------------------------------------------------------------
// Loads and stores.
@@ -1411,40 +1492,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Float32Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Float32Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64());
@@ -1453,7 +1500,6 @@ TEST_F(InstructionSelectorTest, Float64Max) {
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1470,7 +1516,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 5ccd0c6727..4c69384667 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -106,11 +106,9 @@ TEST_F(MoveOptimizerTest, RemovesRedundant) {
TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
int first_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(0);
int second_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(1);
StartBlock();
auto first_instr = EmitNop();
diff --git a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
index f0cc407445..45d7427494 100644
--- a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
@@ -30,13 +30,15 @@ class NodeMatcherTest : public GraphTest {
namespace {
template <class Matcher>
-void CheckBaseWithIndexAndDisplacement(Matcher* matcher, Node* index, int scale,
- Node* base, Node* displacement) {
+void CheckBaseWithIndexAndDisplacement(
+ Matcher* matcher, Node* index, int scale, Node* base, Node* displacement,
+ DisplacementMode displacement_mode = kPositiveDisplacement) {
EXPECT_TRUE(matcher->matches());
EXPECT_EQ(index, matcher->index());
EXPECT_EQ(scale, matcher->scale());
EXPECT_EQ(base, matcher->base());
EXPECT_EQ(displacement, matcher->displacement());
+ EXPECT_EQ(displacement_mode, matcher->displacement_mode());
}
} // namespace
@@ -90,6 +92,9 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
const Operator* a_op = machine()->Int32Add();
USE(a_op);
+ const Operator* sub_op = machine()->Int32Sub();
+ USE(sub_op);
+
const Operator* m_op = machine()->Int32Mul();
Node* m1 = graph()->NewNode(m_op, p1, d1);
Node* m2 = graph()->NewNode(m_op, p1, d2);
@@ -354,7 +359,25 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
graph()->NewNode(a_op, s3, graph()->NewNode(a_op, b0, d15)));
CheckBaseWithIndexAndDisplacement(&match43, p1, 3, b0, d15);
- // Check that scales that require using the base address work dorrectly.
+ // S3 + (B0 - D15) -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement32Matcher match44(
+ graph()->NewNode(a_op, s3, graph()->NewNode(sub_op, b0, d15)));
+ CheckBaseWithIndexAndDisplacement(&match44, p1, 3, b0, d15,
+ kNegativeDisplacement);
+
+ // B0 + (B1 - D15) -> [p1, 2, b0, d15, true]
+ BaseWithIndexAndDisplacement32Matcher match45(
+ graph()->NewNode(a_op, b0, graph()->NewNode(sub_op, b1, d15)));
+ CheckBaseWithIndexAndDisplacement(&match45, b1, 0, b0, d15,
+ kNegativeDisplacement);
+
+ // (B0 - D15) + S3 -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement32Matcher match46(
+ graph()->NewNode(a_op, graph()->NewNode(sub_op, b0, d15), s3));
+ CheckBaseWithIndexAndDisplacement(&match46, p1, 3, b0, d15,
+ kNegativeDisplacement);
}
@@ -409,6 +432,9 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
const Operator* a_op = machine()->Int64Add();
USE(a_op);
+ const Operator* sub_op = machine()->Int64Sub();
+ USE(sub_op);
+
const Operator* m_op = machine()->Int64Mul();
Node* m1 = graph()->NewNode(m_op, p1, d1);
Node* m2 = graph()->NewNode(m_op, p1, d2);
@@ -726,8 +752,27 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
BaseWithIndexAndDisplacement64Matcher match50(
graph()->NewNode(a_op, m3, temp));
CheckBaseWithIndexAndDisplacement(&match50, m3, 0, b0, d15);
-}
+ // S3 + (B0 - D15) -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement64Matcher match51(
+ graph()->NewNode(a_op, s3, graph()->NewNode(sub_op, b0, d15)));
+ CheckBaseWithIndexAndDisplacement(&match51, p1, 3, b0, d15,
+ kNegativeDisplacement);
+
+ // B0 + (B1 - D15) -> [p1, 2, b0, d15, true]
+ BaseWithIndexAndDisplacement64Matcher match52(
+ graph()->NewNode(a_op, b0, graph()->NewNode(sub_op, b1, d15)));
+ CheckBaseWithIndexAndDisplacement(&match52, b1, 0, b0, d15,
+ kNegativeDisplacement);
+
+ // (B0 - D15) + S3 -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement64Matcher match53(
+ graph()->NewNode(a_op, graph()->NewNode(sub_op, b0, d15), s3));
+ CheckBaseWithIndexAndDisplacement(&match53, p1, 3, b0, d15,
+ kNegativeDisplacement);
+}
TEST_F(NodeMatcherTest, BranchMatcher_match) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
diff --git a/deps/v8/test/unittests/compiler/node-properties-unittest.cc b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
index 463948d43f..a18f2032f5 100644
--- a/deps/v8/test/unittests/compiler/node-properties-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
@@ -37,8 +37,6 @@ const Operator kMockOperator(IrOpcode::kDead, Operator::kNoProperties,
const Operator kMockCallOperator(IrOpcode::kCall, Operator::kNoProperties,
"MockCallOperator", 0, 0, 0, 0, 0, 2);
-const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
-
} // namespace
@@ -49,7 +47,7 @@ TEST_F(NodePropertiesTest, ReplaceUses) {
Node* use_value = NewMockNode(common.Return(), node);
Node* use_effect = NewMockNode(common.EffectPhi(1), node);
Node* use_success = NewMockNode(common.IfSuccess(), node);
- Node* use_exception = NewMockNode(common.IfException(kNoHint), effect, node);
+ Node* use_exception = NewMockNode(common.IfException(), effect, node);
Node* r_value = NewMockNode(&kMockOperator);
Node* r_effect = NewMockNode(&kMockOperator);
Node* r_success = NewMockNode(&kMockOperator);
@@ -99,7 +97,7 @@ TEST_F(NodePropertiesTest, CollectControlProjections_Call) {
Node* result[2];
CommonOperatorBuilder common(zone());
Node* call = NewMockNode(&kMockCallOperator);
- Node* if_ex = NewMockNode(common.IfException(kNoHint), call, call);
+ Node* if_ex = NewMockNode(common.IfException(), call, call);
Node* if_ok = NewMockNode(common.IfSuccess(), call);
NodeProperties::CollectControlProjections(call, result, arraysize(result));
EXPECT_EQ(if_ok, result[0]);
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 6e5d39f68d..5620b8bec1 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -612,49 +612,6 @@ class IsEffectPhiMatcher final : public NodeMatcher {
};
-class IsEffectSetMatcher final : public NodeMatcher {
- public:
- IsEffectSetMatcher(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher)
- : NodeMatcher(IrOpcode::kEffectSet),
- effect0_matcher_(effect0_matcher),
- effect1_matcher_(effect1_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << "), effect0 (";
- effect0_matcher_.DescribeTo(os);
- *os << ") and effect1 (";
- effect1_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- if (!NodeMatcher::MatchAndExplain(node, listener)) return false;
-
- Node* effect0 = NodeProperties::GetEffectInput(node, 0);
- Node* effect1 = NodeProperties::GetEffectInput(node, 1);
-
- {
- // Try matching in the reverse order first.
- StringMatchResultListener value_listener;
- if (effect0_matcher_.MatchAndExplain(effect1, &value_listener) &&
- effect1_matcher_.MatchAndExplain(effect0, &value_listener)) {
- return true;
- }
- }
-
- return PrintMatchAndExplain(effect0, "effect0", effect0_matcher_,
- listener) &&
- PrintMatchAndExplain(effect1, "effect1", effect1_matcher_, listener);
- }
-
- private:
- const Matcher<Node*> effect0_matcher_;
- const Matcher<Node*> effect1_matcher_;
-};
-
-
class IsProjectionMatcher final : public NodeMatcher {
public:
IsProjectionMatcher(const Matcher<size_t>& index_matcher,
@@ -843,6 +800,44 @@ class IsReferenceEqualMatcher final : public NodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
+class IsSpeculativeBinopMatcher final : public NodeMatcher {
+ public:
+ IsSpeculativeBinopMatcher(IrOpcode::Value opcode,
+ const Matcher<NumberOperationHint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode),
+ hint_matcher_(hint_matcher),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ // TODO(bmeurer): The type parameter is currently ignored.
+ PrintMatchAndExplain(OpParameter<NumberOperationHint>(node->op()),
+ "hints", hint_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+ rhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<NumberOperationHint> hint_matcher_;
+ const Matcher<Type*> type_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
class IsAllocateMatcher final : public NodeMatcher {
public:
@@ -1203,132 +1198,140 @@ class IsStoreElementMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsLoadMatcher final : public NodeMatcher {
- public:
- IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kLoad),
- rep_matcher_(rep_matcher),
- base_matcher_(base_matcher),
- index_matcher_(index_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
- *os << "), base (";
- base_matcher_.DescribeTo(os);
- *os << "), index (";
- index_matcher_.DescribeTo(os);
- *os << "), effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- Node* effect_node = nullptr;
- Node* control_node = nullptr;
- if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
- effect_node = NodeProperties::GetEffectInput(node);
- }
- if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
- control_node = NodeProperties::GetControlInput(node);
- }
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
- rep_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
- base_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "index", index_matcher_, listener) &&
- PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
- listener) &&
- PrintMatchAndExplain(control_node, "control", control_matcher_,
- listener));
- }
-
- private:
- const Matcher<LoadRepresentation> rep_matcher_;
- const Matcher<Node*> base_matcher_;
- const Matcher<Node*> index_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
-};
-
-
-class IsStoreMatcher final : public NodeMatcher {
- public:
- IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kStore),
- rep_matcher_(rep_matcher),
- base_matcher_(base_matcher),
- index_matcher_(index_matcher),
- value_matcher_(value_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
- *os << "), base (";
- base_matcher_.DescribeTo(os);
- *os << "), index (";
- index_matcher_.DescribeTo(os);
- *os << "), value (";
- value_matcher_.DescribeTo(os);
- *os << "), effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- Node* effect_node = nullptr;
- Node* control_node = nullptr;
- if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
- effect_node = NodeProperties::GetEffectInput(node);
- }
- if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
- control_node = NodeProperties::GetControlInput(node);
- }
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
- rep_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
- base_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value", value_matcher_, listener) &&
- PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
- listener) &&
- PrintMatchAndExplain(control_node, "control", control_matcher_,
- listener));
- }
-
- private:
- const Matcher<StoreRepresentation> rep_matcher_;
- const Matcher<Node*> base_matcher_;
- const Matcher<Node*> index_matcher_;
- const Matcher<Node*> value_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
-};
+#define LOAD_MATCHER(kLoad) \
+ class Is##kLoad##Matcher final : public NodeMatcher { \
+ public: \
+ Is##kLoad##Matcher(const Matcher<kLoad##Representation>& rep_matcher, \
+ const Matcher<Node*>& base_matcher, \
+ const Matcher<Node*>& index_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) \
+ : NodeMatcher(IrOpcode::k##kLoad), \
+ rep_matcher_(rep_matcher), \
+ base_matcher_(base_matcher), \
+ index_matcher_(index_matcher), \
+ effect_matcher_(effect_matcher), \
+ control_matcher_(control_matcher) {} \
+ \
+ void DescribeTo(std::ostream* os) const final { \
+ NodeMatcher::DescribeTo(os); \
+ *os << " whose rep ("; \
+ rep_matcher_.DescribeTo(os); \
+ *os << "), base ("; \
+ base_matcher_.DescribeTo(os); \
+ *os << "), index ("; \
+ index_matcher_.DescribeTo(os); \
+ *os << "), effect ("; \
+ effect_matcher_.DescribeTo(os); \
+ *os << ") and control ("; \
+ control_matcher_.DescribeTo(os); \
+ *os << ")"; \
+ } \
+ \
+ bool MatchAndExplain(Node* node, \
+ MatchResultListener* listener) const final { \
+ Node* effect_node = nullptr; \
+ Node* control_node = nullptr; \
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) { \
+ effect_node = NodeProperties::GetEffectInput(node); \
+ } \
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
+ control_node = NodeProperties::GetControlInput(node); \
+ } \
+ return (NodeMatcher::MatchAndExplain(node, listener) && \
+ PrintMatchAndExplain(OpParameter<kLoad##Representation>(node), \
+ "rep", rep_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
+ "base", base_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
+ "index", index_matcher_, listener) && \
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_, \
+ listener) && \
+ PrintMatchAndExplain(control_node, "control", control_matcher_, \
+ listener)); \
+ } \
+ \
+ private: \
+ const Matcher<kLoad##Representation> rep_matcher_; \
+ const Matcher<Node*> base_matcher_; \
+ const Matcher<Node*> index_matcher_; \
+ const Matcher<Node*> effect_matcher_; \
+ const Matcher<Node*> control_matcher_; \
+ };
+
+LOAD_MATCHER(Load)
+LOAD_MATCHER(UnalignedLoad)
+
+#define STORE_MATCHER(kStore) \
+ class Is##kStore##Matcher final : public NodeMatcher { \
+ public: \
+ Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
+ const Matcher<Node*>& base_matcher, \
+ const Matcher<Node*>& index_matcher, \
+ const Matcher<Node*>& value_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) \
+ : NodeMatcher(IrOpcode::k##kStore), \
+ rep_matcher_(rep_matcher), \
+ base_matcher_(base_matcher), \
+ index_matcher_(index_matcher), \
+ value_matcher_(value_matcher), \
+ effect_matcher_(effect_matcher), \
+ control_matcher_(control_matcher) {} \
+ \
+ void DescribeTo(std::ostream* os) const final { \
+ NodeMatcher::DescribeTo(os); \
+ *os << " whose rep ("; \
+ rep_matcher_.DescribeTo(os); \
+ *os << "), base ("; \
+ base_matcher_.DescribeTo(os); \
+ *os << "), index ("; \
+ index_matcher_.DescribeTo(os); \
+ *os << "), value ("; \
+ value_matcher_.DescribeTo(os); \
+ *os << "), effect ("; \
+ effect_matcher_.DescribeTo(os); \
+ *os << ") and control ("; \
+ control_matcher_.DescribeTo(os); \
+ *os << ")"; \
+ } \
+ \
+ bool MatchAndExplain(Node* node, \
+ MatchResultListener* listener) const final { \
+ Node* effect_node = nullptr; \
+ Node* control_node = nullptr; \
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) { \
+ effect_node = NodeProperties::GetEffectInput(node); \
+ } \
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
+ control_node = NodeProperties::GetControlInput(node); \
+ } \
+ return (NodeMatcher::MatchAndExplain(node, listener) && \
+ PrintMatchAndExplain(OpParameter<kStore##Representation>(node), \
+ "rep", rep_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
+ "base", base_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
+ "index", index_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), \
+ "value", value_matcher_, listener) && \
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_, \
+ listener) && \
+ PrintMatchAndExplain(control_node, "control", control_matcher_, \
+ listener)); \
+ } \
+ \
+ private: \
+ const Matcher<kStore##Representation> rep_matcher_; \
+ const Matcher<Node*> base_matcher_; \
+ const Matcher<Node*> index_matcher_; \
+ const Matcher<Node*> value_matcher_; \
+ const Matcher<Node*> effect_matcher_; \
+ const Matcher<Node*> control_matcher_; \
+ };
+
+STORE_MATCHER(Store)
+STORE_MATCHER(UnalignedStore)
class IsStackSlotMatcher final : public NodeMatcher {
public:
@@ -1352,32 +1355,6 @@ class IsStackSlotMatcher final : public NodeMatcher {
const Matcher<MachineRepresentation> rep_matcher_;
};
-class IsGuardMatcher final : public NodeMatcher {
- public:
- IsGuardMatcher(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kGuard),
- type_matcher_(type_matcher),
- value_matcher_(value_matcher),
- control_matcher_(control_matcher) {}
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<Type*>(node->op()), "type",
- type_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value", value_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
- "control", control_matcher_, listener));
- }
-
- private:
- const Matcher<Type*> type_matcher_;
- const Matcher<Node*> value_matcher_;
- const Matcher<Node*> control_matcher_;
-};
-
class IsToNumberMatcher final : public NodeMatcher {
public:
IsToNumberMatcher(const Matcher<Node*>& base_matcher,
@@ -1818,12 +1795,6 @@ Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
}
-Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher) {
- return MakeMatcher(new IsEffectSetMatcher(effect0_matcher, effect1_matcher));
-}
-
-
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher) {
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
@@ -2064,13 +2035,6 @@ Matcher<Node*> IsTailCall(
effect_matcher, control_matcher));
}
-Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher) {
- return MakeMatcher(
- new IsGuardMatcher(type_matcher, value_matcher, control_matcher));
-}
-
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
@@ -2078,6 +2042,18 @@ Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
new IsReferenceEqualMatcher(type_matcher, lhs_matcher, rhs_matcher));
}
+#define DEFINE_SPECULATIVE_BINOP_MATCHER(opcode) \
+ Matcher<Node*> Is##opcode(const Matcher<NumberOperationHint>& hint_matcher, \
+ const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& rhs_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) { \
+ return MakeMatcher(new IsSpeculativeBinopMatcher( \
+ IrOpcode::k##opcode, hint_matcher, lhs_matcher, rhs_matcher, \
+ effect_matcher, control_matcher)); \
+ }
+SPECULATIVE_BINOPS(DEFINE_SPECULATIVE_BINOP_MATCHER);
+#undef DEFINE_SPECULATIVE_BINOP_MATCHER
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
@@ -2154,7 +2130,6 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
effect_matcher, control_matcher));
}
-
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -2164,6 +2139,15 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
+Matcher<Node*> IsUnalignedLoad(
+ const Matcher<UnalignedLoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsUnalignedLoadMatcher(rep_matcher, base_matcher,
+ index_matcher, effect_matcher,
+ control_matcher));
+}
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -2176,6 +2160,16 @@ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
+Matcher<Node*> IsUnalignedStore(
+ const Matcher<UnalignedStoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsUnalignedStoreMatcher(
+ rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
}
@@ -2204,6 +2198,10 @@ Matcher<Node*> IsLoadFramePointer() {
return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
}
+Matcher<Node*> IsLoadParentFramePointer() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kLoadParentFramePointer));
+}
+
#define IS_QUADOP_MATCHER(Name) \
Matcher<Node*> Is##Name( \
const Matcher<Node*>& a_matcher, const Matcher<Node*>& b_matcher, \
@@ -2242,6 +2240,10 @@ IS_BINOP_MATCHER(NumberShiftLeft)
IS_BINOP_MATCHER(NumberShiftRight)
IS_BINOP_MATCHER(NumberShiftRightLogical)
IS_BINOP_MATCHER(NumberImul)
+IS_BINOP_MATCHER(NumberAtan2)
+IS_BINOP_MATCHER(NumberMax)
+IS_BINOP_MATCHER(NumberMin)
+IS_BINOP_MATCHER(NumberPow)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Or)
IS_BINOP_MATCHER(Word32Xor)
@@ -2256,6 +2258,7 @@ IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32SubWithOverflow)
IS_BINOP_MATCHER(Int32Add)
IS_BINOP_MATCHER(Int32Sub)
IS_BINOP_MATCHER(Int32Mul)
@@ -2266,8 +2269,6 @@ IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(JSAdd)
-IS_BINOP_MATCHER(Float32Max)
-IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float32Equal)
IS_BINOP_MATCHER(Float32LessThan)
IS_BINOP_MATCHER(Float32LessThanOrEqual)
@@ -2284,6 +2285,7 @@ IS_BINOP_MATCHER(Float64InsertHighWord32)
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
IS_UNOP_MATCHER(BooleanNot)
+IS_UNOP_MATCHER(TruncateFloat64ToWord32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
IS_UNOP_MATCHER(ChangeInt32ToFloat64)
@@ -2291,23 +2293,55 @@ IS_UNOP_MATCHER(ChangeInt32ToInt64)
IS_UNOP_MATCHER(ChangeUint32ToFloat64)
IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
-IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float32Abs)
+IS_UNOP_MATCHER(Float32Neg)
IS_UNOP_MATCHER(Float64Abs)
+IS_UNOP_MATCHER(Float64Neg)
IS_UNOP_MATCHER(Float64Sqrt)
IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(Float64ExtractLowWord32)
IS_UNOP_MATCHER(Float64ExtractHighWord32)
+IS_UNOP_MATCHER(NumberAbs)
+IS_UNOP_MATCHER(NumberAcos)
+IS_UNOP_MATCHER(NumberAcosh)
+IS_UNOP_MATCHER(NumberAsin)
+IS_UNOP_MATCHER(NumberAsinh)
+IS_UNOP_MATCHER(NumberAtan)
+IS_UNOP_MATCHER(NumberAtanh)
+IS_UNOP_MATCHER(NumberCeil)
+IS_UNOP_MATCHER(NumberClz32)
+IS_UNOP_MATCHER(NumberCbrt)
+IS_UNOP_MATCHER(NumberCos)
+IS_UNOP_MATCHER(NumberCosh)
+IS_UNOP_MATCHER(NumberExp)
+IS_UNOP_MATCHER(NumberExpm1)
+IS_UNOP_MATCHER(NumberFloor)
+IS_UNOP_MATCHER(NumberFround)
+IS_UNOP_MATCHER(NumberLog)
+IS_UNOP_MATCHER(NumberLog1p)
+IS_UNOP_MATCHER(NumberLog10)
+IS_UNOP_MATCHER(NumberLog2)
+IS_UNOP_MATCHER(NumberRound)
+IS_UNOP_MATCHER(NumberSign)
+IS_UNOP_MATCHER(NumberSin)
+IS_UNOP_MATCHER(NumberSinh)
+IS_UNOP_MATCHER(NumberSqrt)
+IS_UNOP_MATCHER(NumberTan)
+IS_UNOP_MATCHER(NumberTanh)
+IS_UNOP_MATCHER(NumberTrunc)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(PlainPrimitiveToNumber)
IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(StringFromCharCode)
IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
+IS_UNOP_MATCHER(Word32ReverseBytes)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index dd036c9939..2a24803380 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -6,6 +6,7 @@
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -33,6 +34,15 @@ class Node;
using ::testing::Matcher;
+#define SPECULATIVE_BINOPS(V) \
+ V(SpeculativeNumberAdd) \
+ V(SpeculativeNumberSubtract) \
+ V(SpeculativeNumberShiftLeft) \
+ V(SpeculativeNumberShiftRight) \
+ V(SpeculativeNumberShiftRightLogical) \
+ V(SpeculativeNumberBitwiseAnd) \
+ V(SpeculativeNumberBitwiseOr) \
+ V(SpeculativeNumberBitwiseXor)
Matcher<Node*> IsDead();
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
@@ -97,8 +107,6 @@ Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher);
-Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher);
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
@@ -201,6 +209,18 @@ Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
+#define DECLARE_SPECULATIVE_BINOP_MATCHER(opcode) \
+ Matcher<Node*> Is##opcode(const Matcher<NumberOperationHint>& hint_matcher, \
+ const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& rhs_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher);
+SPECULATIVE_BINOPS(DECLARE_SPECULATIVE_BINOP_MATCHER);
+#undef DECLARE_SPECULATIVE_BINOP_MATCHER
+
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberMultiply(const Matcher<Node*>& lhs_matcher,
@@ -213,6 +233,43 @@ Matcher<Node*> IsNumberShiftRightLogical(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberImul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAbs(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAcos(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAcosh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAsin(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAsinh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtanh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan2(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberCbrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCeil(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberClz32(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCos(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCosh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExp(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExpm1(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFloor(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFround(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog1p(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog10(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog2(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberMax(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberMin(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberRound(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberPow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberSign(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSin(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSinh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSqrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTanh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringFromCharCode(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
@@ -257,12 +314,22 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsUnalignedLoad(
+ const Matcher<UnalignedLoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsUnalignedStore(
+ const Matcher<UnalignedStoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
@@ -295,6 +362,8 @@ Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32SubWithOverflow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Sub(const Matcher<Node*>& lhs_matcher,
@@ -315,6 +384,7 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsTruncateFloat64ToWord32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
@@ -322,13 +392,9 @@ Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Neg(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32LessThan(const Matcher<Node*>& lhs_matcher,
@@ -342,6 +408,7 @@ Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Neg(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
@@ -362,6 +429,8 @@ Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
Matcher<Node*> IsLoadFramePointer();
+Matcher<Node*> IsLoadParentFramePointer();
+Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsInt32PairAdd(const Matcher<Node*>& a_matcher,
const Matcher<Node*>& b_matcher,
@@ -386,11 +455,9 @@ Matcher<Node*> IsWord32PairShr(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsWord32PairSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& mid_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32ReverseBytes(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStackSlot();
-Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
index 5341f69716..8379e2668d 100644
--- a/deps/v8/test/unittests/compiler/node-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -7,7 +7,9 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
+using testing::Contains;
using testing::ElementsAre;
+using testing::ElementsAreArray;
using testing::UnorderedElementsAre;
namespace v8 {
@@ -252,6 +254,10 @@ TEST_F(NodeTest, BigNodes) {
for (int i = 0; i < size; i++) {
EXPECT_EQ(inputs[i], node->InputAt(i));
}
+
+ EXPECT_THAT(n0->uses(), Contains(node));
+ EXPECT_THAT(n1->uses(), Contains(node));
+ EXPECT_THAT(node->inputs(), ElementsAreArray(inputs, size));
}
}
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index c5ff90f301..71a726f167 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -678,8 +678,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
- // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
- // Expand the test once greedy is back online with this facility.
+ // so only var3 is spilled in deferred blocks.
const int var3_reg = 2;
const int var3_slot = 2;
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
index 713ee6e742..97cafdb6e6 100644
--- a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "test/unittests/compiler/compiler-test-utils.h"
@@ -135,7 +137,7 @@ TEST_F(SchedulerRPOTest, EntryLoop) {
TEST_F(SchedulerRPOTest, EndLoop) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 3, true);
@@ -144,7 +146,7 @@ TEST_F(SchedulerRPOTest, EndLoop) {
TEST_F(SchedulerRPOTest, EndLoopNested) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
@@ -318,8 +320,8 @@ TEST_F(SchedulerRPOTest, LoopNest2) {
TEST_F(SchedulerRPOTest, LoopFollow1) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -338,8 +340,8 @@ TEST_F(SchedulerRPOTest, LoopFollow1) {
TEST_F(SchedulerRPOTest, LoopFollow2) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* S = schedule.NewBasicBlock();
@@ -361,8 +363,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
for (int size = 1; size < 5; size++) {
for (int exit = 0; exit < size; exit++) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, size));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -381,8 +383,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
@@ -414,7 +416,7 @@ TEST_F(SchedulerRPOTest, LoopBackedges1) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -437,7 +439,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges1) {
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -459,7 +461,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges2) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -481,7 +483,7 @@ TEST_F(SchedulerRPOTest, LoopOutloops1) {
Schedule schedule(zone());
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index da77bdcb4c..1b57e5f483 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -552,12 +552,10 @@ TARGET_TEST_F(SchedulerTest, CallException) {
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* c1 = graph()->NewNode(&kMockCall, start);
Node* ok1 = graph()->NewNode(common()->IfSuccess(), c1);
- Node* ex1 = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), c1, c1);
+ Node* ex1 = graph()->NewNode(common()->IfException(), c1, c1);
Node* c2 = graph()->NewNode(&kMockCall, ok1);
Node* ok2 = graph()->NewNode(common()->IfSuccess(), c2);
- Node* ex2 = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), c2, c2);
+ Node* ex2 = graph()->NewNode(common()->IfException(), c2, c2);
Node* hdl = graph()->NewNode(common()->Merge(2), ex1, ex2);
Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index f571898107..b21a148718 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -20,10 +20,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-class SimplifiedOperatorReducerTest : public TypedGraphTest {
+class SimplifiedOperatorReducerTest : public GraphTest {
public:
explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
- : TypedGraphTest(num_parameters), simplified_(zone()) {}
+ : GraphTest(num_parameters), simplified_(zone()) {}
~SimplifiedOperatorReducerTest() override {}
protected:
@@ -32,7 +32,8 @@ class SimplifiedOperatorReducerTest : public TypedGraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
- SimplifiedOperatorReducer reducer(&jsgraph);
+ GraphReducer graph_reducer(zone(), graph());
+ SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
@@ -91,31 +92,15 @@ const int32_t kInt32Values[] = {
1866841746, 2032089723, 2147483647};
-const uint32_t kUint32Values[] = {
- 0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
- 0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
- 0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
- 0x83, 0x88, 0x96, 0x9c, 0xa3, 0xfa,
- 0x7a7, 0x165d, 0x234d, 0x3acb, 0x43a5, 0x4573,
- 0x5b4f, 0x5f14, 0x6996, 0x6c6e, 0x7289, 0x7b9a,
- 0x7bc9, 0x86bb, 0xa839, 0xaa41, 0xb03b, 0xc942,
- 0xce68, 0xcf4c, 0xd3ad, 0xdea3, 0xe90c, 0xed86,
- 0xfba5, 0x172dcc6, 0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
- 0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
- 0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
- 0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
- 0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
- 0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
- 0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
- 0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
- 0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-
-
const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
+const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
+ CheckForMinusZeroMode::kDontCheckForMinusZero,
+ CheckForMinusZeroMode::kCheckForMinusZero};
+
} // namespace
@@ -150,79 +135,72 @@ TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
+// ChangeTaggedToBit
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithChangeTaggedToBit) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeBitToBool(),
- graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+ simplified()->ChangeBitToTagged(),
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithZeroConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+ graph()->NewNode(simplified()->ChangeBitToTagged(), Int32Constant(0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsFalseConstant());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithOneConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+ graph()->NewNode(simplified()->ChangeBitToTagged(), Int32Constant(1)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsTrueConstant());
}
// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
+// ChangeTaggedToBit
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithFalseConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), FalseConstant()));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithTrueConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), TrueConstant()));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithChangeBitToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeBoolToBit(),
- graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+ simplified()->ChangeTaggedToBit(),
+ graph()->NewNode(simplified()->ChangeBitToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
}
-
// -----------------------------------------------------------------------------
// ChangeFloat64ToTagged
-
TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeFloat64ToTagged(mode), Float64Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
+ }
}
}
-
// -----------------------------------------------------------------------------
// ChangeInt32ToTagged
@@ -244,14 +222,15 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToFloat64(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(param0, reduction.replacement());
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+ }
}
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeInt32ToTagged) {
Node* param0 = Parameter(0);
@@ -297,18 +276,18 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
// -----------------------------------------------------------------------------
// ChangeTaggedToInt32
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+ }
}
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeInt32ToTagged) {
Node* param0 = Parameter(0);
@@ -320,84 +299,163 @@ TEST_F(SimplifiedOperatorReducerTest,
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
}
}
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
+
+// -----------------------------------------------------------------------------
+// TruncateTaggedToWord32
+
+TEST_F(SimplifiedOperatorReducerTest,
+ TruncateTaggedToWord3WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(nan)));
+ simplified()->TruncateTaggedToWord32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
}
}
+TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->TruncateTaggedToWord32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ }
+}
// -----------------------------------------------------------------------------
-// ChangeTaggedToUint32
+// CheckTaggedPointer
-
-TEST_F(SimplifiedOperatorReducerTest,
- ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithChangeBitToTagged) {
Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = graph()->NewNode(simplified()->ChangeBitToTagged(), param0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ simplified()->CheckTaggedPointer(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+ EXPECT_EQ(value, reduction.replacement());
}
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithHeapConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, object, kHeapObjects) {
+ Node* value = HeapConstant(object);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedPointer(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckTaggedSigned
TEST_F(SimplifiedOperatorReducerTest,
- ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ CheckTaggedSignedWithChangeInt31ToTaggedSigned) {
Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value =
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(),
- graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ simplified()->CheckTaggedSigned(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(param0, reduction.replacement());
+ EXPECT_EQ(value, reduction.replacement());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
- }
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedSignedWithNumberConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = NumberConstant(1.0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedSigned(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
}
+// -----------------------------------------------------------------------------
+// NumberAbs
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(nan)));
+TEST_F(SimplifiedOperatorReducerTest, NumberAbsWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->NumberAbs(), NumberConstant(n)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(std::fabs(n)));
}
}
-
// -----------------------------------------------------------------------------
-// ChangeUint32ToTagged
+// ObjectIsSmi
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithChangeBitToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeBitToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+TEST_F(SimplifiedOperatorReducerTest,
+ ObjectIsSmiWithChangeInt31ToTaggedSigned) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
-TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
- TRACED_FOREACH(uint32_t, n, kUint32Values) {
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithHeapConstant) {
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, o, kHeapObjects) {
Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
- Int32Constant(bit_cast<int32_t>(n))));
+ Reduce(graph()->NewNode(simplified()->ObjectIsSmi(), HeapConstant(o)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+ }
+}
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ObjectIsSmi(), NumberConstant(n)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(FastUI2D(n))));
+ EXPECT_THAT(reduction.replacement(), IsBooleanConstant(IsSmiDouble(n)));
}
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index bd8509ff97..febd76a528 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -31,7 +31,6 @@ std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
return os << IrOpcode::Mnemonic(pop.opcode);
}
-
const PureOperator kPureOperators[] = {
#define PURE(Name, properties, input_count) \
{ \
@@ -39,7 +38,6 @@ const PureOperator kPureOperators[] = {
Operator::kPure | properties, input_count \
}
PURE(BooleanNot, Operator::kNoProperties, 1),
- PURE(BooleanToNumber, Operator::kNoProperties, 1),
PURE(NumberEqual, Operator::kCommutative, 2),
PURE(NumberLessThan, Operator::kNoProperties, 2),
PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
@@ -56,15 +54,15 @@ const PureOperator kPureOperators[] = {
PURE(NumberShiftRightLogical, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
- PURE(PlainPrimitiveToNumber, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
- PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
- PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
- PURE(ChangeBitToBool, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToBit, Operator::kNoProperties, 1),
+ PURE(ChangeBitToTagged, Operator::kNoProperties, 1),
+ PURE(TruncateTaggedToWord32, Operator::kNoProperties, 1),
PURE(ObjectIsNumber, Operator::kNoProperties, 1),
PURE(ObjectIsReceiver, Operator::kNoProperties, 1),
PURE(ObjectIsSmi, Operator::kNoProperties, 1)
@@ -156,7 +154,8 @@ TEST_P(SimplifiedBufferAccessOperatorTest, LoadBuffer) {
const Operator* op = simplified.LoadBuffer(access);
EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
@@ -176,7 +175,8 @@ TEST_P(SimplifiedBufferAccessOperatorTest, StoreBuffer) {
const Operator* op = simplified.StoreBuffer(access);
EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(4, op->ValueInputCount());
@@ -203,39 +203,46 @@ namespace {
const ElementAccess kElementAccesses[] = {
{kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int8()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int16()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int32()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint8()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint16()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint32()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32()},
+ MachineType::AnyTagged(), kFullWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8(),
+ kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16(),
+ kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32(),
+ kNoWriteBarrier},
{kUntaggedBase, 0, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kUntaggedBase, 0, Type::Number(),
- MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int8()},
+ MachineType::Int8(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint8()},
+ MachineType::Uint8(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int16()},
+ MachineType::Int16(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint16()},
+ MachineType::Uint16(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int32()},
+ MachineType::Int32(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint32()},
+ MachineType::Uint32(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)}};
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier}};
} // namespace
@@ -251,7 +258,8 @@ TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
const Operator* op = simplified.LoadElement(access);
EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(2, op->ValueInputCount());
@@ -271,7 +279,8 @@ TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
const Operator* op = simplified.StoreElement(access);
EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
diff --git a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
index 3441c68b96..56fedeeb09 100644
--- a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -26,14 +26,12 @@ class TailCallOptimizationTest : public GraphTest {
TEST_F(TailCallOptimizationTest, CallCodeObject0) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
Node* p0 = Parameter(0);
@@ -48,14 +46,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject0) {
TEST_F(TailCallOptimizationTest, CallCodeObject1) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -63,8 +59,7 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
@@ -74,14 +69,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
TEST_F(TailCallOptimizationTest, CallCodeObject2) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -98,14 +91,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject2) {
TEST_F(TailCallOptimizationTest, CallJSFunction0) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
Node* p0 = Parameter(0);
@@ -120,14 +111,12 @@ TEST_F(TailCallOptimizationTest, CallJSFunction0) {
TEST_F(TailCallOptimizationTest, CallJSFunction1) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -135,8 +124,7 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
@@ -146,14 +134,11 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
TEST_F(TailCallOptimizationTest, CallJSFunction2) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 9d664a6d3a..ca5c1cae45 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -51,7 +51,7 @@ class TyperTest : public TypedGraphTest {
Types types_;
JSOperatorBuilder javascript_;
- BinaryOperationHints const hints_ = BinaryOperationHints::Any();
+ BinaryOperationHint const hints_ = BinaryOperationHint::kAny;
Node* context_node_;
v8::base::RandomNumberGenerator* rng_;
std::vector<double> integers;
@@ -290,44 +290,51 @@ TEST_F(TyperTest, TypeJSShiftRight) {
TEST_F(TyperTest, TypeJSLessThan) {
- TestBinaryCompareOp(javascript_.LessThan(), std::less<double>());
+ TestBinaryCompareOp(javascript_.LessThan(CompareOperationHint::kAny),
+ std::less<double>());
}
TEST_F(TyperTest, TypeJSLessThanOrEqual) {
- TestBinaryCompareOp(javascript_.LessThanOrEqual(), std::less_equal<double>());
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(CompareOperationHint::kAny),
+ std::less_equal<double>());
}
TEST_F(TyperTest, TypeJSGreaterThan) {
- TestBinaryCompareOp(javascript_.GreaterThan(), std::greater<double>());
+ TestBinaryCompareOp(javascript_.GreaterThan(CompareOperationHint::kAny),
+ std::greater<double>());
}
TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
- TestBinaryCompareOp(javascript_.GreaterThanOrEqual(),
- std::greater_equal<double>());
+ TestBinaryCompareOp(
+ javascript_.GreaterThanOrEqual(CompareOperationHint::kAny),
+ std::greater_equal<double>());
}
TEST_F(TyperTest, TypeJSEqual) {
- TestBinaryCompareOp(javascript_.Equal(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.Equal(CompareOperationHint::kAny),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSNotEqual) {
- TestBinaryCompareOp(javascript_.NotEqual(), std::not_equal_to<double>());
+ TestBinaryCompareOp(javascript_.NotEqual(CompareOperationHint::kAny),
+ std::not_equal_to<double>());
}
// For numbers there's no difference between strict and non-strict equality.
TEST_F(TyperTest, TypeJSStrictEqual) {
- TestBinaryCompareOp(javascript_.StrictEqual(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.StrictEqual(CompareOperationHint::kAny),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSStrictNotEqual) {
- TestBinaryCompareOp(javascript_.StrictNotEqual(),
+ TestBinaryCompareOp(javascript_.StrictNotEqual(CompareOperationHint::kAny),
std::not_equal_to<double>());
}
@@ -335,10 +342,9 @@ TEST_F(TyperTest, TypeJSStrictNotEqual) {
//------------------------------------------------------------------------------
// Monotonicity
-
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name()); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(CompareOperationHint::kAny)); \
}
TEST_BINARY_MONOTONICITY(Equal)
TEST_BINARY_MONOTONICITY(NotEqual)
@@ -350,9 +356,9 @@ TEST_BINARY_MONOTONICITY(LessThanOrEqual)
TEST_BINARY_MONOTONICITY(GreaterThanOrEqual)
#undef TEST_BINARY_MONOTONICITY
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name(BinaryOperationHints::Any())); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(BinaryOperationHint::kAny)); \
}
TEST_BINARY_MONOTONICITY(BitwiseOr)
TEST_BINARY_MONOTONICITY(BitwiseXor)
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
index c003033940..c04d6609a7 100644
--- a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -28,7 +28,8 @@ static const TestOperator kOp1(1, Operator::kIdempotent, 1, 1);
class ValueNumberingReducerTest : public TestWithZone {
public:
- ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+ ValueNumberingReducerTest()
+ : graph_(zone()), reducer_(zone(), graph()->zone()) {}
protected:
Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index d6ed73266c..540c5e71c2 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -33,7 +33,6 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
}
-
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
@@ -71,6 +70,41 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
}
+namespace {
+struct LoadWithToInt64Extension {
+ MachineType type;
+ ArchOpcode expected_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const LoadWithToInt64Extension& i32toi64) {
+ return os << i32toi64.type;
+}
+
+static const LoadWithToInt64Extension kLoadWithToInt64Extensions[] = {
+ {MachineType::Int8(), kX64Movsxbq},
+ {MachineType::Uint8(), kX64Movzxbq},
+ {MachineType::Int16(), kX64Movsxwq},
+ {MachineType::Uint16(), kX64Movzxwq},
+ {MachineType::Int32(), kX64Movsxlq}};
+
+} // namespace
+
+typedef InstructionSelectorTestWithParam<LoadWithToInt64Extension>
+ InstructionSelectorChangeInt32ToInt64Test;
+
+TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) {
+ const LoadWithToInt64Extension extension = GetParam();
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer());
+ m.Return(m.ChangeInt32ToInt64(m.Load(extension.type, m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(extension.expected_opcode, s[0]->arch_opcode());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorChangeInt32ToInt64Test,
+ ::testing::ValuesIn(kLoadWithToInt64Extensions));
// -----------------------------------------------------------------------------
// Loads and stores
@@ -1134,71 +1168,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
-
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
// -----------------------------------------------------------------------------
// Miscellaneous.
@@ -1334,6 +1303,55 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Uint64(), p0);
+ Node* const shift = m.Word64Shr(load, m.Int32Constant(32));
+ m.Return(shift);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Int64(), p0);
+ Node* const shift = m.Word64Sar(load, m.Int32Constant(32));
+ m.Return(shift);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Int64(), p0);
+ Node* const shift = m.Word64Sar(load, m.Int32Constant(32));
+ Node* const truncate = m.TruncateInt64ToInt32(shift);
+ m.Return(truncate);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
new file mode 100644
index 0000000000..27485db67e
--- /dev/null
+++ b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Test enabled only on supported architectures.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_ARM64)
+
+using namespace v8::internal;
+
+namespace {
+
+class EhFrameIteratorTest : public testing::Test {};
+
+} // namespace
+
+TEST_F(EhFrameIteratorTest, Values) {
+ // Assuming little endian.
+ static const byte kEncoded[] = {0xde, 0xc0, 0xad, 0xde, 0xef, 0xbe, 0xff};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(0xdeadc0de, iterator.GetNextUInt32());
+ EXPECT_EQ(0xbeef, iterator.GetNextUInt16());
+ EXPECT_EQ(0xff, iterator.GetNextByte());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, Skip) {
+ static const byte kEncoded[] = {0xde, 0xad, 0xc0, 0xde};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ iterator.Skip(2);
+ EXPECT_EQ(2, iterator.GetCurrentOffset());
+ EXPECT_EQ(0xc0, iterator.GetNextByte());
+ iterator.Skip(1);
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, ULEB128Decoding) {
+ static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(624485, iterator.GetNextULeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, SLEB128DecodingPositive) {
+ static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(624485, iterator.GetNextSLeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, SLEB128DecodingNegative) {
+ static const byte kEncoded[] = {0x9b, 0xf1, 0x59};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(-624485, iterator.GetNextSLeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+#endif
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
new file mode 100644
index 0000000000..98503986aa
--- /dev/null
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -0,0 +1,464 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+#include "test/unittests/test-utils.h"
+
+// Test enabled only on supported architectures.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_ARM64)
+
+using namespace v8::internal;
+
+namespace {
+
+class EhFrameWriterTest : public TestWithZone {
+ protected:
+ // Being a 7bit positive integer, this also serves as its ULEB128 encoding.
+ static const int kTestRegisterCode = 0;
+
+ static EhFrameIterator MakeIterator(EhFrameWriter* writer) {
+ CodeDesc desc;
+ writer->GetEhFrame(&desc);
+ DCHECK_GT(desc.unwinding_info_size, 0);
+ return EhFrameIterator(desc.unwinding_info,
+ desc.unwinding_info + desc.unwinding_info_size);
+ }
+};
+
+const int EhFrameWriterTest::kTestRegisterCode;
+
+} // namespace
+
+TEST_F(EhFrameWriterTest, Alignment) {
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(42 * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ ASSERT_EQ(0, EhFrameConstants::kEhFrameHdrSize % 4);
+ ASSERT_EQ(0, EhFrameConstants::kEhFrameTerminatorSize % 4);
+ EXPECT_EQ(0, (iterator.GetBufferSize() - EhFrameConstants::kEhFrameHdrSize -
+ EhFrameConstants::kEhFrameTerminatorSize) %
+ kPointerSize);
+}
+
+TEST_F(EhFrameWriterTest, FDEHeader) {
+ static const int kProcedureSize = 0x5678abcd;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.Finish(kProcedureSize);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ int cie_size = iterator.GetNextUInt32();
+ iterator.Skip(cie_size);
+
+ int fde_size = iterator.GetNextUInt32();
+ EXPECT_EQ(iterator.GetBufferSize(),
+ fde_size + cie_size + EhFrameConstants::kEhFrameTerminatorSize +
+ EhFrameConstants::kEhFrameHdrSize + 2 * kInt32Size);
+
+ int backwards_offset_to_cie_offset = iterator.GetCurrentOffset();
+ int backwards_offset_to_cie = iterator.GetNextUInt32();
+ EXPECT_EQ(backwards_offset_to_cie_offset, backwards_offset_to_cie);
+
+ int procedure_address_offset = iterator.GetCurrentOffset();
+ int procedure_address = iterator.GetNextUInt32();
+ EXPECT_EQ(-(procedure_address_offset + RoundUp(kProcedureSize, 8)),
+ procedure_address);
+
+ int procedure_size = iterator.GetNextUInt32();
+ EXPECT_EQ(kProcedureSize, procedure_size);
+}
+
+TEST_F(EhFrameWriterTest, SetOffset) {
+ static const int kOffset = 0x0badc0de;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressOffset(kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, IncreaseOffset) {
+ static const int kFirstOffset = 121;
+ static const int kSecondOffset = 16;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressOffset(kFirstOffset);
+ writer.IncreaseBaseAddressOffset(kSecondOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextULeb128());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset + kSecondOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SetRegister) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressRegister(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaRegister,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SetRegisterAndOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset = 0x0badc0de;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressRegisterAndOffset(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfa, iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(kOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding6bit) {
+ static const int kOffset = 42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding6bitDelta) {
+ static const int kFirstOffset = 42;
+ static const int kSecondOffset = 62;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kFirstOffset, iterator.GetNextByte());
+ EXPECT_EQ((1 << 6) | (kSecondOffset - kFirstOffset), iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding8bit) {
+ static const int kOffset = 0x42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding8bitDelta) {
+ static const int kFirstOffset = 0x10;
+ static const int kSecondOffset = 0x70;
+ static const int kThirdOffset = 0xb5;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kThirdOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kSecondOffset - kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kThirdOffset - kSecondOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding16bit) {
+ static const int kOffset = kMaxUInt8 + 42;
+ ASSERT_LT(kOffset, kMaxUInt16);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc2,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextUInt16());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding16bitDelta) {
+ static const int kFirstOffset = 0x41;
+ static const int kSecondOffset = kMaxUInt8 + 0x42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc2,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kSecondOffset - kFirstOffset, iterator.GetNextUInt16());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding32bit) {
+ static const int kOffset = kMaxUInt16 + 42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc4,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextUInt32());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding32bitDelta) {
+ static const int kFirstOffset = kMaxUInt16 + 0x42;
+ static const int kSecondOffset = kMaxUInt16 + 0x67;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc4,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextUInt32());
+
+ EXPECT_EQ((1 << 6) | (kSecondOffset - kFirstOffset), iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, SaveRegisterUnsignedOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset =
+ EhFrameConstants::kDataAlignmentFactor > 0 ? 12344 : -12344;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterSavedToStack(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((2 << 6) | kTestRegisterCode, iterator.GetNextByte());
+ EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
+ iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SaveRegisterSignedOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset =
+ EhFrameConstants::kDataAlignmentFactor < 0 ? 12344 : -12344;
+
+ ASSERT_EQ(kOffset % EhFrameConstants::kDataAlignmentFactor, 0);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterSavedToStack(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kOffsetExtendedSf,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
+ iterator.GetNextSLeb128());
+}
+
+TEST_F(EhFrameWriterTest, RegisterNotModified) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterNotModified(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kSameValue,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, RegisterFollowsInitialRule) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterFollowsInitialRule(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((3 << 6) | kTestRegisterCode, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, EhFrameHdrLayout) {
+ static const int kCodeSize = 10;
+ static const int kPaddingSize = 6;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.Finish(kCodeSize);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+
+ // Skip the .eh_frame.
+
+ int encoded_cie_size = iterator.GetNextUInt32();
+ iterator.Skip(encoded_cie_size);
+ int cie_size = encoded_cie_size + kInt32Size;
+
+ int encoded_fde_size = iterator.GetNextUInt32();
+ iterator.Skip(encoded_fde_size);
+ int fde_size = encoded_fde_size + kInt32Size;
+
+ iterator.Skip(EhFrameConstants::kEhFrameTerminatorSize);
+
+ int eh_frame_size =
+ cie_size + fde_size + EhFrameConstants::kEhFrameTerminatorSize;
+
+ //
+ // Plugging some numbers in the DSO layout shown in eh-frame.cc:
+ //
+ // | ... |
+ // +---------------+ <-- (E) ---------
+ // | | ^
+ // | Instructions | 10 bytes | .text
+ // | | v
+ // +---------------+ <----------------
+ // |///////////////|
+ // |////Padding////| 6 bytes
+ // |///////////////|
+ // +---------------+ <---(D)----------
+ // | | ^
+ // | CIE | cie_size bytes* |
+ // | | |
+ // +---------------+ <-- (C) |
+ // | | | .eh_frame
+ // | FDE | fde_size bytes |
+ // | | |
+ // +---------------+ |
+ // | terminator | 4 bytes v
+ // +---------------+ <-- (B) ---------
+ // | version | ^
+ // +---------------+ 4 bytes |
+ // | encoding | |
+ // | specifiers | |
+ // +---------------+ <---(A) | .eh_frame_hdr
+ // | offset to | |
+ // | .eh_frame | |
+ // +---------------+ |
+ // | ... | ...
+ //
+ // (*) the size of the CIE is platform dependent.
+ //
+
+ int eh_frame_hdr_version = iterator.GetNextByte();
+ EXPECT_EQ(EhFrameConstants::kEhFrameHdrVersion, eh_frame_hdr_version);
+
+ // .eh_frame pointer encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kSData4 | EhFrameConstants::kPcRel,
+ iterator.GetNextByte());
+
+ // Lookup table size encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kUData4, iterator.GetNextByte());
+
+ // Lookup table pointers encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kSData4 | EhFrameConstants::kDataRel,
+ iterator.GetNextByte());
+
+ // A -> D
+ int offset_to_eh_frame = iterator.GetNextUInt32();
+ EXPECT_EQ(-(EhFrameConstants::kFdeVersionSize +
+ EhFrameConstants::kFdeEncodingSpecifiersSize + eh_frame_size),
+ offset_to_eh_frame);
+
+ int lut_entries = iterator.GetNextUInt32();
+ EXPECT_EQ(1, lut_entries);
+
+ // B -> E
+ int offset_to_procedure = iterator.GetNextUInt32();
+ EXPECT_EQ(-(eh_frame_size + kPaddingSize + kCodeSize), offset_to_procedure);
+
+ // B -> C
+ int offset_to_fde = iterator.GetNextUInt32();
+ EXPECT_EQ(-(fde_size + EhFrameConstants::kEhFrameTerminatorSize),
+ offset_to_fde);
+}
+
+#endif
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 2bf4d037d3..84e4d973e2 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -5,12 +5,17 @@
#include <cmath>
#include <limits>
+#include "src/globals.h"
#include "src/heap/gc-tracer.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
+typedef TestWithContext GCTracerTest;
+
TEST(GCTracer, AverageSpeed) {
RingBuffer<BytesAndDuration> buffer;
EXPECT_EQ(100 / 2,
@@ -45,5 +50,190 @@ TEST(GCTracer, AverageSpeed) {
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
}
+namespace {
+
+void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
+ size_t new_space_counter_bytes,
+ size_t old_generation_counter_bytes) {
+ tracer->SampleAllocation(time_ms, new_space_counter_bytes,
+ old_generation_counter_bytes);
+ tracer->AddAllocation(time_ms);
+}
+
+} // namespace
+
+TEST_F(GCTracerTest, AllocationThroughput) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ tracer->SampleAllocation(time1, counter1, counter1);
+ SampleAndAddAllocaton(tracer, time1, counter1, counter1);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, counter2);
+ // Will only consider the current sample.
+ size_t throughput = static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(100));
+ EXPECT_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, counter3);
+ // Considers last 2 samples.
+ throughput = tracer->AllocationThroughputInBytesPerMillisecond(801);
+ EXPECT_EQ(2 * (counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, NewSpaceAllocationThroughput) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ SampleAndAddAllocaton(tracer, time1, counter1, 0);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, 0);
+ size_t throughput =
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, 0);
+ throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ SampleAndAddAllocaton(tracer, time1, counter1, 0);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, 0);
+ // Will only consider the current sample.
+ size_t throughput =
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, 0);
+ // Considers last 2 samples.
+ throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(801);
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ SampleAndAddAllocaton(tracer, time1, 0, counter1);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, 0, counter2);
+ // Will only consider the current sample.
+ size_t throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, 0, counter3);
+ // Considers last 2 samples.
+ throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(801));
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, RegularScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ EXPECT_DOUBLE_EQ(0.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
+ // Sample not added because it's not within a started tracer.
+ tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(100.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
+}
+
+TEST_F(GCTracerTest, IncrementalScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ EXPECT_DOUBLE_EQ(
+ 0.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
+ // Sample is added because its ScopeId is listed as incremental sample.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 200.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
+}
+
+TEST_F(GCTracerTest, IncrementalMarkingDetails) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ // Round 1.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 50);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 100,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(
+ 2,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
+ EXPECT_DOUBLE_EQ(
+ 150,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .cumulative_duration);
+
+ // Round 2. Cumulative numbers should add up, others should be reset.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 15);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 122,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(
+ 3,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
+ EXPECT_DOUBLE_EQ(
+ 300,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .cumulative_duration);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
new file mode 100644
index 0000000000..0015cce8d7
--- /dev/null
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -0,0 +1,160 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/globals.h"
+#include "src/heap/marking.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(Marking, MarkWhiteBlackWhite) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionWhiteBlackWhite) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::WhiteToBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::BlackToWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionAnyToGrey) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::AnyToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::AnyToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionWhiteGreyBlackGrey) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::WhiteToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::GreyToBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::BlackToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, SetAndClearRange) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ for (int i = 0; i < 3; i++) {
+ bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff << i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1 << i) - 1);
+ bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0);
+ }
+ free(bitmap);
+}
+
+TEST(Marking, ClearMultipleRanges) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
+ bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffff);
+ CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
+ bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
+ bitmap->ClearRange(Bitmap::kBitsPerCell,
+ Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
+ Bitmap::kBitsPerCell * 2 + 16);
+ bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffff);
+ CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
+ CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
+ Bitmap::kBitsPerCell));
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000);
+ CHECK(
+ bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
+ 2 * Bitmap::kBitsPerCell));
+ CHECK(bitmap->AllBitsClearInRange(
+ Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ff);
+ CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
+ 2 * Bitmap::kBitsPerCell + 8));
+ CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
+ Bitmap::kBitsPerCell * 3));
+ free(bitmap);
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 26a26f0258..cfb1f1f9d2 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -142,23 +142,29 @@ TEST(SlotSet, RemoveRange) {
TEST(TypedSlotSet, Iterate) {
TypedSlotSet set(0);
const int kDelta = 10000001;
+ const int kHostDelta = 50001;
int added = 0;
- for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
+ uint32_t j = 0;
+ for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset;
+ i += kDelta, j += kHostDelta) {
SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
- set.Insert(type, i);
+ set.Insert(type, j, i);
++added;
}
int iterated = 0;
- set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
+ set.Iterate([&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
+ Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ uint32_t j = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
+ EXPECT_EQ(0, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
EXPECT_EQ(added, iterated);
iterated = 0;
- set.Iterate([&iterated](SlotType type, Address addr) {
+ set.Iterate([&iterated](SlotType type, Address host_addr, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 255d836af5..fffc97f54d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "test/unittests/test-utils.h"
@@ -21,12 +22,18 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 1, 131);
+ Factory* factory = isolate()->factory();
CHECK_EQ(builder.locals_count(), 131);
CHECK_EQ(builder.context_count(), 1);
CHECK_EQ(builder.fixed_register_count(), 132);
+ Register reg(0);
+ Register other(reg.index() + 1);
+ Register wide(128);
+
// Emit argument creation operations.
builder.CreateArguments(CreateArgumentsType::kMappedArguments)
.CreateArguments(CreateArgumentsType::kUnmappedArguments)
@@ -34,31 +41,41 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit constant loads.
builder.LoadLiteral(Smi::FromInt(0))
+ .StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
+ .CompareOperation(Token::Value::NE, reg) // Prevent peephole optimization
+ // LdaSmi, Star -> LdrSmi.
+ .StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(10000000))
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(factory->NewStringFromStaticChars("A constant"))
+ .StoreAccumulatorInRegister(reg)
.LoadUndefined()
+ .Debugger() // Prevent peephole optimization LdaNull, Star -> LdrNull.
.LoadNull()
+ .StoreAccumulatorInRegister(reg)
.LoadTheHole()
+ .StoreAccumulatorInRegister(reg)
.LoadTrue()
- .LoadFalse();
-
- Register reg(0);
- Register other(reg.index() + 1);
- Register wide(128);
+ .StoreAccumulatorInRegister(reg)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(wide);
- builder.LoadAccumulatorWithRegister(reg)
- .LoadNull()
- .StoreAccumulatorInRegister(reg);
+ // Emit Ldar and Star taking care to foil the register optimizer.
+ builder.StackCheck(0)
+ .LoadAccumulatorWithRegister(other)
+ .BinaryOperation(Token::ADD, reg, 1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadNull();
// Emit register-register transfer.
builder.MoveRegister(reg, other);
builder.MoveRegister(reg, wide);
// Emit global load / store operations.
- Factory* factory = isolate()->factory();
Handle<String> name = factory->NewStringFromStaticChars("var_name");
- builder.LoadGlobal(name, 1, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1, LanguageMode::SLOPPY)
.StoreGlobal(name, 1, LanguageMode::STRICT);
@@ -83,21 +100,24 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(name, LanguageMode::STRICT);
// Emit closure operations.
- Handle<SharedFunctionInfo> shared_info = factory->NewSharedFunctionInfo(
- factory->NewStringFromStaticChars("function_a"), MaybeHandle<Code>(),
- false);
- builder.CreateClosure(shared_info, NOT_TENURED);
+ builder.CreateClosure(0, NOT_TENURED);
+
+ // Emit create context operation.
+ builder.CreateBlockContext(factory->NewScopeInfo(1));
+ builder.CreateCatchContext(reg, name);
+ builder.CreateFunctionContext(1);
+ builder.CreateWithContext(reg);
// Emit literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
.CreateArrayLiteral(factory->NewFixedArray(1), 0, 0)
- .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0);
+ .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0, reg);
// Call operations.
- builder.Call(reg, other, 1, 0)
- .Call(reg, wide, 1, 0)
- .TailCall(reg, other, 1, 0)
- .TailCall(reg, wide, 1, 0)
+ builder.Call(reg, other, 0, 1)
+ .Call(reg, wide, 0, 1)
+ .TailCall(reg, other, 0, 1)
+ .TailCall(reg, wide, 0, 1)
.CallRuntime(Runtime::kIsArray, reg, 1)
.CallRuntime(Runtime::kIsArray, wide, 1)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg, 1, other)
@@ -106,27 +126,45 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, wide, 1);
// Emit binary operator invocations.
- builder.BinaryOperation(Token::Value::ADD, reg)
- .BinaryOperation(Token::Value::SUB, reg)
- .BinaryOperation(Token::Value::MUL, reg)
- .BinaryOperation(Token::Value::DIV, reg)
- .BinaryOperation(Token::Value::MOD, reg);
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
+ .BinaryOperation(Token::Value::SUB, reg, 2)
+ .BinaryOperation(Token::Value::MUL, reg, 3)
+ .BinaryOperation(Token::Value::DIV, reg, 4)
+ .BinaryOperation(Token::Value::MOD, reg, 5);
// Emit bitwise operator invocations
- builder.BinaryOperation(Token::Value::BIT_OR, reg)
- .BinaryOperation(Token::Value::BIT_XOR, reg)
- .BinaryOperation(Token::Value::BIT_AND, reg);
+ builder.BinaryOperation(Token::Value::BIT_OR, reg, 6)
+ .BinaryOperation(Token::Value::BIT_XOR, reg, 7)
+ .BinaryOperation(Token::Value::BIT_AND, reg, 8);
// Emit shift operator invocations
- builder.BinaryOperation(Token::Value::SHL, reg)
- .BinaryOperation(Token::Value::SAR, reg)
- .BinaryOperation(Token::Value::SHR, reg);
+ builder.BinaryOperation(Token::Value::SHL, reg, 9)
+ .BinaryOperation(Token::Value::SAR, reg, 10)
+ .BinaryOperation(Token::Value::SHR, reg, 11);
+
+ // Emit peephole optimizations of LdaSmi followed by binary operation.
+ builder.LoadLiteral(Smi::FromInt(1))
+ .BinaryOperation(Token::Value::ADD, reg, 1)
+ .LoadLiteral(Smi::FromInt(2))
+ .BinaryOperation(Token::Value::SUB, reg, 2)
+ .LoadLiteral(Smi::FromInt(3))
+ .BinaryOperation(Token::Value::BIT_AND, reg, 3)
+ .LoadLiteral(Smi::FromInt(4))
+ .BinaryOperation(Token::Value::BIT_OR, reg, 4)
+ .LoadLiteral(Smi::FromInt(5))
+ .BinaryOperation(Token::Value::SHL, reg, 5)
+ .LoadLiteral(Smi::FromInt(6))
+ .BinaryOperation(Token::Value::SAR, reg, 6);
// Emit count operatior invocations
- builder.CountOperation(Token::Value::ADD).CountOperation(Token::Value::SUB);
+ builder.CountOperation(Token::Value::ADD, 1)
+ .CountOperation(Token::Value::SUB, 1);
// Emit unary operator invocations.
- builder.LogicalNot().TypeOf();
+ builder
+ .LogicalNot() // ToBooleanLogicalNot
+ .LogicalNot() // non-ToBoolean LogicalNot
+ .TypeOf();
// Emit delete
builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
@@ -147,33 +185,41 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CompareOperation(Token::Value::IN, reg);
// Emit cast operator invocations.
- builder.CastAccumulatorToNumber()
- .CastAccumulatorToJSObject()
- .CastAccumulatorToName();
+ builder.CastAccumulatorToNumber(reg)
+ .CastAccumulatorToJSObject(reg)
+ .CastAccumulatorToName(reg);
// Emit control flow. Return must be the last instruction.
BytecodeLabel start;
builder.Bind(&start);
- // Short jumps with Imm8 operands
- builder.Jump(&start)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
+ {
+ // Short jumps with Imm8 operands
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
// Longer jumps with constant operands
BytecodeLabel end[8];
- builder.Jump(&end[0])
- .LoadTrue()
- .JumpIfTrue(&end[1])
- .LoadTrue()
- .JumpIfFalse(&end[2])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfTrue(&end[3])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfFalse(&end[4])
- .JumpIfNull(&end[5])
- .JumpIfUndefined(&end[6])
- .JumpIfNotHole(&end[7]);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&end[0])
+ .Bind(&after_jump)
+ .LoadTrue()
+ .JumpIfTrue(&end[1])
+ .LoadTrue()
+ .JumpIfFalse(&end[2])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfTrue(&end[3])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfFalse(&end[4])
+ .JumpIfNull(&end[5])
+ .JumpIfUndefined(&end[6])
+ .JumpIfNotHole(&end[7]);
+ }
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
@@ -183,45 +229,54 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfFalse(&start);
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&start);
// Insert dummy ops to force longer jumps
for (int i = 0; i < 128; i++) {
builder.LoadTrue();
}
// Longer jumps requiring Constant operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
- // Perform an operation that returns boolean value to
- // generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
- // Perform an operation that returns a non-boolean operation to
- // generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
- .JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
- .JumpIfFalse(&start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
+ .JumpIfFalse(&start);
+ }
// Emit stack check bytecode.
- builder.StackCheck();
+ builder.StackCheck(0);
+
+ // Emit an OSR poll bytecode.
+ builder.OsrPoll(1);
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw;
- builder.Jump(&after_throw).Throw().Bind(&after_throw);
+ builder.Throw().Bind(&after_throw);
BytecodeLabel after_rethrow;
- builder.Jump(&after_rethrow).ReThrow().Bind(&after_rethrow);
+ builder.ReThrow().Bind(&after_rethrow);
- builder.ForInPrepare(reg)
+ builder.ForInPrepare(reg, reg)
.ForInDone(reg, reg)
.ForInNext(reg, reg, reg, 1)
.ForInStep(reg);
- builder.ForInPrepare(wide)
+ builder.ForInPrepare(reg, wide)
.ForInDone(reg, other)
.ForInNext(wide, wide, wide, 1024)
.ForInStep(reg);
@@ -235,14 +290,14 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
// Emit wide global load / store operations.
- builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1024, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1024, LanguageMode::SLOPPY)
.StoreGlobal(wide_name, 1, LanguageMode::STRICT);
// Emit extra wide global load.
- builder.LoadGlobal(name, 1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
+ builder.LoadGlobal(1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
// Emit wide load / store property operations.
builder.LoadNamedProperty(reg, wide_name, 0)
@@ -261,34 +316,56 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
.StoreLookupSlot(wide_name, LanguageMode::STRICT);
+ // Emit loads which will be transformed to Ldr equivalents by the peephole
+ // optimizer.
+ builder.LoadNamedProperty(reg, name, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadKeyedProperty(reg, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadContextSlot(reg, 1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadGlobal(0, TypeofMode::NOT_INSIDE_TYPEOF)
+ .StoreAccumulatorInRegister(reg)
+ .LoadUndefined()
+ .StoreAccumulatorInRegister(reg);
+
// CreateClosureWide
- Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
- factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
- false);
- builder.CreateClosure(shared_info2, NOT_TENURED);
+ builder.CreateClosure(1000, NOT_TENURED);
// Emit wide variant of literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("wide_literal"),
0, 0)
.CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
- .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
+ .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0, reg);
// Longer jumps requiring ConstantWide operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
+
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
+
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&start);
+ // Emit generator operations
+ builder.SuspendGenerator(reg)
+ .ResumeGenerator(reg);
+
// Intrinsics handled by the interpreter.
builder.CallRuntime(Runtime::kInlineIsArray, reg, 1)
.CallRuntime(Runtime::kInlineIsArray, wide, 1);
@@ -300,7 +377,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.Return();
// Generate BytecodeArray.
- Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
builder.fixed_and_temporary_register_count() * kPointerSize);
@@ -327,6 +404,30 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Insert entry for illegal bytecode as this is never willingly emitted.
scorecard[Bytecodes::ToByte(Bytecode::kIllegal)] = 1;
+ // Insert entry for nop bytecode as this often gets optimized out.
+ scorecard[Bytecodes::ToByte(Bytecode::kNop)] = 1;
+
+ if (!FLAG_ignition_peephole) {
+ // Insert entries for bytecodes only emitted by peephole optimizer.
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrNamedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrKeyedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrGlobal)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrContextSlot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrUndefined)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLogicalNot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJump)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrue)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalse)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrueConstant)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalseConstant)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kAddSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kSubSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kBitwiseAndSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kBitwiseOrSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kShiftLeftSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kShiftRightSmi)] = 1;
+ }
+
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
@@ -342,18 +443,30 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
+ CanonicalHandleScope canonical(isolate());
for (int locals = 0; locals < 5; locals++) {
for (int contexts = 0; contexts < 4; contexts++) {
for (int temps = 0; temps < 3; temps++) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
BytecodeRegisterAllocator temporaries(
zone(), builder.temporary_register_allocator());
+ for (int i = 0; i < locals + contexts; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(Register(i));
+ }
for (int i = 0; i < temps; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
builder.StoreAccumulatorInRegister(temporaries.NewRegister());
}
+ if (temps > 0) {
+ // Ensure temporaries are used so not optimized away by the
+ // register optimizer.
+ builder.New(Register(locals + contexts), Register(locals + contexts),
+ static_cast<size_t>(temps));
+ }
builder.Return();
- Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
int total_registers = locals + contexts + temps;
CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
}
@@ -363,22 +476,22 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
+ CanonicalHandleScope canonical(isolate());
int index = 1;
- int32_t operand = -index;
Register the_register(index);
CHECK_EQ(the_register.index(), index);
int actual_operand = the_register.ToOperand();
- CHECK_EQ(actual_operand, operand);
-
int actual_index = Register::FromOperand(actual_operand).index();
CHECK_EQ(actual_index, index);
}
TEST_F(BytecodeArrayBuilderTest, Parameters) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 0);
+
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
CHECK_EQ(param9.index() - param0.index(), 9);
@@ -386,6 +499,7 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, RegisterType) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 3);
BytecodeRegisterAllocator register_allocator(
zone(), builder.temporary_register_allocator());
@@ -409,7 +523,9 @@ TEST_F(BytecodeArrayBuilderTest, RegisterType) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
@@ -423,28 +539,37 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
.LoadLiteral(heap_num_2_copy)
.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
// Should only have one entry for each identical constant.
CHECK_EQ(array->constant_pool()->length(), 3);
}
+static Bytecode PeepholeToBoolean(Bytecode jump_bytecode) {
+ return FLAG_ignition_peephole
+ ? Bytecodes::GetJumpWithoutToBoolean(jump_bytecode)
+ : jump_bytecode;
+}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
+ CanonicalHandleScope canonical(isolate());
static const int kFarJumpDistance = 256;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
BytecodeLabel near0, near1, near2, near3, near4;
+ BytecodeLabel after_jump0, after_jump1;
builder.Jump(&near0)
+ .Bind(&after_jump0)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&near1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&near2)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&near3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&near4)
.Bind(&near0)
.Bind(&near1)
@@ -452,47 +577,50 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
.Bind(&near3)
.Bind(&near4)
.Jump(&far0)
+ .Bind(&after_jump1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&far1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&far2)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 3)
.JumpIfTrue(&far3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 4)
.JumpIfFalse(&far4);
- for (int i = 0; i < kFarJumpDistance - 18; i++) {
- builder.LoadUndefined();
+ for (int i = 0; i < kFarJumpDistance - 20; i++) {
+ builder.Debugger();
}
builder.Bind(&far0).Bind(&far1).Bind(&far2).Bind(&far3).Bind(&far4);
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
- DCHECK_EQ(array->length(), 36 + kFarJumpDistance - 18 + 1);
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
+ DCHECK_EQ(array->length(), 40 + kFarJumpDistance - 20 + 1);
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetImmediateOperand(0), 18);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 20);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
- CHECK_EQ(iterator.GetImmediateOperand(0), 14);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
+ CHECK_EQ(iterator.GetImmediateOperand(0), 16);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
- CHECK_EQ(iterator.GetImmediateOperand(0), 10);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
+ CHECK_EQ(iterator.GetImmediateOperand(0), 12);
iterator.Advance();
// Ignore add operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
- CHECK_EQ(iterator.GetImmediateOperand(0), 6);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 7);
iterator.Advance();
// Ignore add operation.
@@ -502,7 +630,6 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
iterator.Advance();
-
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance));
@@ -511,7 +638,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrueConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 4));
iterator.Advance();
@@ -519,7 +647,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalseConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 8));
iterator.Advance();
@@ -529,7 +658,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 12));
+ Smi::FromInt(kFarJumpDistance - 13));
iterator.Advance();
// Ignore add operation.
@@ -538,13 +667,15 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
Bytecode::kJumpIfToBooleanFalseConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 16));
+ Smi::FromInt(kFarJumpDistance - 18));
iterator.Advance();
}
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel label0, label1, label2, label3, label4;
@@ -557,41 +688,46 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&label2)
.Bind(&label3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&label3)
.Bind(&label4)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&label4);
- for (int i = 0; i < 63; i++) {
- builder.Jump(&label4);
+ for (int i = 0; i < 62; i++) {
+ BytecodeLabel after_jump;
+ builder.Jump(&label4).Bind(&after_jump);
}
// Add padding to force wide backwards jumps.
for (int i = 0; i < 256; i++) {
- builder.LoadTrue();
+ builder.Debugger();
}
- builder.BinaryOperation(Token::Value::ADD, reg).JumpIfFalse(&label4);
- builder.BinaryOperation(Token::Value::ADD, reg).JumpIfTrue(&label3);
+ builder.BinaryOperation(Token::Value::ADD, reg, 1).JumpIfFalse(&label4);
+ builder.BinaryOperation(Token::Value::ADD, reg, 2).JumpIfTrue(&label3);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfTrue(&label1);
builder.Jump(&label0);
+ BytecodeLabel end;
+ builder.Bind(&end);
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 0);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
@@ -599,23 +735,24 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
// Ignore binary operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
- for (int i = 0; i < 63; i++) {
+ for (int i = 0; i < 62; i++) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 4);
+ // offset of 5 (3 for binary operation and 2 for jump)
+ CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 5);
iterator.Advance();
}
// Check padding to force wide backwards jumps.
for (int i = 0; i < 256; i++) {
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaTrue);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
iterator.Advance();
}
// Ignore binary operation.
@@ -628,23 +765,25 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -399);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -401);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -409);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -411);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -419);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -421);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -425);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -427);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -653,15 +792,22 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
- BytecodeLabel label;
-
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label).Return();
+ BytecodeLabel label, after_jump0, after_jump1;
+
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1)
+ .Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
@@ -679,16 +825,22 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
+ CanonicalHandleScope canonical(isolate());
static const int kRepeats = 3;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
for (int i = 0; i < kRepeats; i++) {
- BytecodeLabel label;
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
+ BytecodeLabel label, after_jump0, after_jump1;
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1);
}
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
for (int i = 0; i < kRepeats; i++) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
@@ -706,85 +858,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK(iterator.done());
}
-TEST_F(BytecodeArrayBuilderTest, OperandScales) {
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kByte),
- OperandScale::kSingle);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kQuad),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kQuad, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kQuad, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kQuad,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kQuad),
- OperandScale::kQuadruple);
-}
-
-TEST_F(BytecodeArrayBuilderTest, SizesForSignOperands) {
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(0) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8 + 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8 - 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16 + 1) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16 - 1) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt) ==
- OperandSize::kQuad);
-}
-
-TEST_F(BytecodeArrayBuilderTest, SizesForUnsignOperands) {
- // int overloads
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(0) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
- OperandSize::kQuad);
- // size_t overloads
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt8)) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt8 + 1)) == OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt16)) == OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt32)) == OperandSize::kQuad);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 43c6caa8cf..b844180dc0 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -37,22 +37,32 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
int feedback_slot = 97;
builder.LoadLiteral(heap_num_0)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
+ .StackCheck(0)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
+ .StackCheck(1)
+ .StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
+ .BinaryOperation(Token::Value::ADD, reg_0, 2)
+ .StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
+ .BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, 1, reg_0)
- .ForInPrepare(reg_0)
+ .ForInPrepare(reg_0, reg_0)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
- BytecodeArrayIterator iterator(builder.ToBytecodeArray());
+ BytecodeArrayIterator iterator(builder.ToBytecodeArray(isolate()));
const int kPrefixByteSize = 1;
int offset = 0;
@@ -64,6 +74,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -72,6 +91,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -79,6 +107,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -87,6 +124,23 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
@@ -96,6 +150,23 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
kPrefixByteSize;
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -104,14 +175,41 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
CHECK_EQ(iterator.GetIndexOperand(1), name_index);
CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kLoadIC, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
@@ -141,7 +239,9 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 3);
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(1), 3);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
@@ -149,8 +249,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetRuntimeIdOperand(0)),
- Runtime::kLoadIC_Miss);
+ CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
CHECK(!iterator.done());
@@ -167,8 +266,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- CHECK_EQ(iterator.current_bytecode_size(), 10);
- CHECK_EQ(iterator.GetIndexOperand(1), 0x10000000);
+ CHECK_EQ(iterator.current_bytecode_size(), 6);
+ CHECK_EQ(iterator.GetIndexOperand(0), 0x10000000);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
new file mode 100644
index 0000000000..9681612ac4
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -0,0 +1,254 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/factory.h"
+#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/isolate.h"
+#include "src/source-position-table.h"
+#include "src/utils.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
+ public:
+ BytecodeArrayWriterUnittest()
+ : constant_array_builder_(zone(), isolate()->factory()->the_hole_value()),
+ bytecode_array_writer_(
+ zone(), &constant_array_builder_,
+ SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
+ ~BytecodeArrayWriterUnittest() override {}
+
+ void Write(BytecodeNode* node, const BytecodeSourceInfo& info);
+ void Write(Bytecode bytecode,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+
+ void WriteJump(Bytecode bytecode, BytecodeLabel* label,
+
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+
+ BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
+ ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return writer()->source_position_table_builder();
+ }
+ int max_register_count() { return writer()->max_register_count(); }
+
+ private:
+ ConstantArrayBuilder constant_array_builder_;
+ BytecodeArrayWriter bytecode_array_writer_;
+};
+
+void BytecodeArrayWriterUnittest::Write(BytecodeNode* node,
+ const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ node->source_info().Clone(info);
+ }
+ writer()->Write(node);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
+ BytecodeLabel* label,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, 0);
+ if (info.is_valid()) {
+ node.source_info().Clone(info);
+ }
+ writer()->WriteJump(&node, label);
+}
+
+TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
+ CHECK_EQ(bytecodes()->size(), 0);
+
+ Write(Bytecode::kStackCheck, {10, false});
+ CHECK_EQ(bytecodes()->size(), 1);
+ CHECK_EQ(max_register_count(), 0);
+
+ Write(Bytecode::kLdaSmi, 127, {55, true});
+ CHECK_EQ(bytecodes()->size(), 3);
+ CHECK_EQ(max_register_count(), 0);
+
+ Write(Bytecode::kLdar, Register(200).ToOperand());
+ CHECK_EQ(bytecodes()->size(), 7);
+ CHECK_EQ(max_register_count(), 201);
+
+ Write(Bytecode::kReturn, {70, true});
+ CHECK_EQ(bytecodes()->size(), 8);
+ CHECK_EQ(max_register_count(), 201);
+
+ static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(127), B(Wide),
+ B(Ldar), R16(200), B(Return)};
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
+ for (size_t i = 0; i < arraysize(bytes); ++i) {
+ CHECK_EQ(bytecodes()->at(i), bytes[i]);
+ }
+
+ Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
+ isolate(), 0, 0, factory()->empty_fixed_array());
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
+
+ PositionTableEntry expected_positions[] = {
+ {0, 10, false}, {1, 55, true}, {7, 70, true}};
+ SourcePositionTableIterator source_iterator(
+ bytecode_array->source_position_table());
+ for (size_t i = 0; i < arraysize(expected_positions); ++i) {
+ const PositionTableEntry& expected = expected_positions[i];
+ CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
+ CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
+ source_iterator.Advance();
+ }
+ CHECK(source_iterator.done());
+}
+
+TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
+ static const uint8_t expected_bytes[] = {
+ // clang-format off
+ /* 0 30 E> */ B(StackCheck),
+ /* 1 42 S> */ B(LdaConstant), U8(0),
+ /* 3 42 E> */ B(Star), R8(1),
+ /* 5 68 S> */ B(JumpIfUndefined), U8(38),
+ /* 7 */ B(JumpIfNull), U8(36),
+ /* 9 */ B(ToObject), R8(3),
+ /* 11 */ B(ForInPrepare), R8(3), R8(4),
+ /* 14 */ B(LdaZero),
+ /* 15 */ B(Star), R8(7),
+ /* 17 63 S> */ B(ForInDone), R8(7), R8(6),
+ /* 20 */ B(JumpIfTrue), U8(23),
+ /* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
+ /* 27 */ B(JumpIfUndefined), U8(10),
+ /* 29 */ B(Star), R8(0),
+ /* 31 54 E> */ B(StackCheck),
+ /* 32 */ B(Ldar), R8(0),
+ /* 34 */ B(Star), R8(2),
+ /* 36 85 S> */ B(Return),
+ /* 37 */ B(ForInStep), R8(7),
+ /* 39 */ B(Star), R8(7),
+ /* 41 */ B(Jump), U8(-24),
+ /* 43 */ B(LdaUndefined),
+ /* 44 85 S> */ B(Return),
+ // clang-format on
+ };
+
+ static const PositionTableEntry expected_positions[] = {
+ {0, 30, false}, {1, 42, true}, {3, 42, false}, {5, 68, true},
+ {17, 63, true}, {31, 54, false}, {36, 85, true}, {44, 85, true}};
+
+ BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
+
+#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
+ Write(Bytecode::kStackCheck, {30, false});
+ Write(Bytecode::kLdaConstant, U8(0), {42, true});
+ CHECK_EQ(max_register_count(), 0);
+ Write(Bytecode::kStar, R(1), {42, false});
+ CHECK_EQ(max_register_count(), 2);
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
+ WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
+ Write(Bytecode::kToObject, R(3));
+ CHECK_EQ(max_register_count(), 4);
+ Write(Bytecode::kForInPrepare, R(3), R(4));
+ CHECK_EQ(max_register_count(), 7);
+ Write(Bytecode::kLdaZero);
+ CHECK_EQ(max_register_count(), 7);
+ Write(Bytecode::kStar, R(7));
+ CHECK_EQ(max_register_count(), 8);
+ writer()->BindLabel(&back_jump);
+ Write(Bytecode::kForInDone, R(7), R(6), {63, true});
+ CHECK_EQ(max_register_count(), 8);
+ WriteJump(Bytecode::kJumpIfTrue, &jump_end_3);
+ Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
+ Write(Bytecode::kStar, R(0));
+ Write(Bytecode::kStackCheck, {54, false});
+ Write(Bytecode::kLdar, R(0));
+ Write(Bytecode::kStar, R(2));
+ Write(Bytecode::kReturn, {85, true});
+ writer()->BindLabel(&jump_for_in);
+ Write(Bytecode::kForInStep, R(7));
+ Write(Bytecode::kStar, R(7));
+ WriteJump(Bytecode::kJump, &back_jump);
+ writer()->BindLabel(&jump_end_1);
+ writer()->BindLabel(&jump_end_2);
+ writer()->BindLabel(&jump_end_3);
+ Write(Bytecode::kLdaUndefined);
+ Write(Bytecode::kReturn, {85, true});
+ CHECK_EQ(max_register_count(), 8);
+#undef R
+
+ CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
+ for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
+ CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
+ static_cast<int>(expected_bytes[i]));
+ }
+
+ Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
+ isolate(), 0, 0, factory()->empty_fixed_array());
+ SourcePositionTableIterator source_iterator(
+ bytecode_array->source_position_table());
+ for (size_t i = 0; i < arraysize(expected_positions); ++i) {
+ const PositionTableEntry& expected = expected_positions[i];
+ CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
+ CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
+ source_iterator.Advance();
+ }
+ CHECK(source_iterator.done());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
new file mode 100644
index 0000000000..2b2171bc78
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeDeadCodeOptimizerTest() : dead_code_optimizer_(this) {}
+ ~BytecodeDeadCodeOptimizerTest() override {}
+
+ void Write(BytecodeNode* node) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ BytecodeDeadCodeOptimizer* optimizer() { return &dead_code_optimizer_; }
+
+ int write_count() const { return write_count_; }
+ const BytecodeNode& last_written() const { return last_written_; }
+
+ private:
+ BytecodeDeadCodeOptimizer dead_code_optimizer_;
+
+ int write_count_ = 0;
+ BytecodeNode last_written_;
+};
+
+TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReturnEliminated) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterThrowEliminated) {
+ BytecodeNode thrw(Bytecode::kThrow);
+ optimizer()->Write(&thrw);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
+ BytecodeNode rethrow(Bytecode::kReThrow);
+ optimizer()->Write(&rethrow);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, CodeLiveAfterLabelBind) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
new file mode 100644
index 0000000000..7d61f6a1b3
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -0,0 +1,87 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
+ struct BytecodesAndResult {
+ const uint8_t bytecode[32];
+ const size_t length;
+ int parameter_count;
+ const char* output;
+ };
+
+ const BytecodesAndResult cases[] = {
+ {{B(LdaSmi), U8(1)}, 2, 0, " LdaSmi [1]"},
+ {{B(Wide), B(LdaSmi), U16(1000)}, 4, 0, " LdaSmi.Wide [1000]"},
+ {{B(ExtraWide), B(LdaSmi), U32(100000)},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [100000]"},
+ {{B(LdaSmi), U8(-1)}, 2, 0, " LdaSmi [-1]"},
+ {{B(Wide), B(LdaSmi), U16(-1000)}, 4, 0, " LdaSmi.Wide [-1000]"},
+ {{B(ExtraWide), B(LdaSmi), U32(-100000)},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [-100000]"},
+ {{B(Star), R8(5)}, 2, 0, " Star r5"},
+ {{B(Wide), B(Star), R16(136)}, 4, 0, " Star.Wide r136"},
+ {{B(Wide), B(Call), R16(134), R16(135), U16(2), U16(177)},
+ 10,
+ 0,
+ "Call.Wide r134, r135, #2, [177]"},
+ {{B(Ldar),
+ static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
+ 2,
+ 3,
+ " Ldar a1"},
+ {{B(Wide), B(CreateObjectLiteral), U16(513), U16(1027), U8(165),
+ R16(137)},
+ 9,
+ 0,
+ "CreateObjectLiteral.Wide [513], [1027], #165, r137"},
+ {{B(ExtraWide), B(JumpIfNull), U32(123456789)},
+ 6,
+ 0,
+ "JumpIfNull.ExtraWide [123456789]"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ // Generate reference string by prepending formatted bytes.
+ std::stringstream expected_ss;
+ std::ios default_format(nullptr);
+ default_format.copyfmt(expected_ss);
+ // Match format of BytecodeDecoder::Decode() for byte representations.
+ expected_ss.fill('0');
+ expected_ss.flags(std::ios::right | std::ios::hex);
+ for (size_t b = 0; b < cases[i].length; b++) {
+ expected_ss << std::setw(2) << static_cast<uint32_t>(cases[i].bytecode[b])
+ << ' ';
+ }
+ expected_ss.copyfmt(default_format);
+ expected_ss << cases[i].output;
+
+ // Generate decoded byte output.
+ std::stringstream actual_ss;
+ BytecodeDecoder::Decode(actual_ss, cases[i].bytecode,
+ cases[i].parameter_count);
+
+ // Compare.
+ CHECK_EQ(actual_ss.str(), expected_ss.str());
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
new file mode 100644
index 0000000000..c23c89b433
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
@@ -0,0 +1,531 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodePeepholeOptimizerTest() : peephole_optimizer_(this) {}
+ ~BytecodePeepholeOptimizerTest() override {}
+
+ void Reset() {
+ last_written_.set_bytecode(Bytecode::kIllegal);
+ write_count_ = 0;
+ }
+
+ void Write(BytecodeNode* node) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ void Flush() {
+ optimizer()->ToBytecodeArray(isolate(), 0, 0,
+ factory()->empty_fixed_array());
+ }
+
+ BytecodePeepholeOptimizer* optimizer() { return &peephole_optimizer_; }
+
+ int write_count() const { return write_count_; }
+ const BytecodeNode& last_written() const { return last_written_; }
+
+ private:
+ BytecodePeepholeOptimizer peephole_optimizer_;
+
+ int write_count_ = 0;
+ BytecodeNode last_written_;
+};
+
+// Sanity tests.
+
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnBind) {
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+// Nop elimination tests.
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ add.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
+}
+
+// Tests covering BytecodePeepholeOptimizer::UpdateCurrentBytecode().
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepJumpIfToBooleanTrue) {
+ BytecodeNode first(Bytecode::kLdaNull);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
+ BytecodeLabel label;
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->WriteJump(&second, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideJumpIfToBooleanTrue) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
+ BytecodeLabel label;
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->WriteJump(&second, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepToBooleanLogicalNot) {
+ BytecodeNode first(Bytecode::kLdaNull);
+ BytecodeNode second(Bytecode::kToBooleanLogicalNot);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideToBooleanLogicalNot) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kToBooleanLogicalNot);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLogicalNot);
+}
+
+// Tests covering BytecodePeepholeOptimizer::CanElideCurrent().
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRy) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(1).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ second.source_info().MakeStatementPosition(0);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kNop);
+ CHECK_EQ(last_written().source_info(), second.source_info());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
+ second.source_info().MakeStatementPosition(0);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 1);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), third);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, LdarToName) {
+ BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kToName, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, TypeOfToName) {
+ BytecodeNode first(Bytecode::kTypeOf);
+ BytecodeNode second(Bytecode::kToName, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kStar);
+}
+
+// Tests covering BytecodePeepholeOptimizer::CanElideLast().
+
+TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kLdaFalse);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ first.source_info().MakeExpressionPosition(3);
+ BytecodeNode second(Bytecode::kLdaFalse);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+ CHECK(second.source_info().is_expression());
+ CHECK_EQ(second.source_info().source_position(), 3);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
+ BytecodeNode first(Bytecode::kNop);
+ BytecodeNode second(Bytecode::kStackCheck);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
+ BytecodeNode first(Bytecode::kNop);
+ first.source_info().MakeExpressionPosition(3);
+ BytecodeNode second(Bytecode::kStackCheck);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ second.source_info().MakeExpressionPosition(
+ first.source_info().source_position());
+ CHECK_EQ(last_written(), second);
+}
+
+// Tests covering BytecodePeepholeOptimizer::UpdateLastAndCurrentBytecodes().
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLoadICStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(31).ToOperand()), 32, 33,
+ static_cast<uint32_t>(Register(256).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
+ BytecodeNode second(Bytecode::kStar, operands[3]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrNamedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
+ const uint32_t operands[] = {static_cast<uint32_t>(Register(31).ToOperand()),
+ 9999997,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrKeyedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaGlobalStar) {
+ const uint32_t operands[] = {19191,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaGlobal, operands[0]);
+ BytecodeNode second(Bytecode::kStar, operands[1]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrGlobal);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaContextSlotStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(200000).ToOperand()), 55005500,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrContextSlot);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaUndefinedStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(100000).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaUndefined);
+ BytecodeNode second(Bytecode::kStar, operands[0]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrUndefined);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ uint32_t imm_operand = 17;
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
+ first.source_info().Clone({3, true});
+ uint32_t reg_operand = Register(0).ToOperand();
+ uint32_t idx_operand = 1;
+ BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
+ CHECK_EQ(last_written().operand_count(), 3);
+ CHECK_EQ(last_written().operand(0), imm_operand);
+ CHECK_EQ(last_written().operand(1), reg_operand);
+ CHECK_EQ(last_written().operand(2), idx_operand);
+ CHECK_EQ(last_written().source_info(), first.source_info());
+ Reset();
+ }
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ uint32_t imm_operand = 17;
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
+ first.source_info().Clone({3, true});
+ uint32_t reg_operand = Register(0).ToOperand();
+ BytecodeNode second(operator_replacement[0], reg_operand, 1);
+ second.source_info().Clone({4, true});
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(last_written(), second);
+ Reset();
+ }
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaZeroWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ BytecodeNode first(Bytecode::kLdaZero);
+ uint32_t reg_operand = Register(0).ToOperand();
+ uint32_t idx_operand = 1;
+ BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
+ CHECK_EQ(last_written().operand_count(), 3);
+ CHECK_EQ(last_written().operand(0), 0);
+ CHECK_EQ(last_written().operand(1), reg_operand);
+ CHECK_EQ(last_written().operand(2), idx_operand);
+ Reset();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
new file mode 100644
index 0000000000..663b7e54e5
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
@@ -0,0 +1,185 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using BytecodeNodeTest = TestWithIsolateAndZone;
+
+TEST(BytecodeSourceInfo, Operations) {
+ BytecodeSourceInfo x(0, true);
+ CHECK_EQ(x.source_position(), 0);
+ CHECK_EQ(x.is_statement(), true);
+ CHECK_EQ(x.is_valid(), true);
+ x.set_invalid();
+ CHECK_EQ(x.is_statement(), false);
+ CHECK_EQ(x.is_valid(), false);
+
+ x.MakeStatementPosition(1);
+ BytecodeSourceInfo y(1, true);
+ CHECK(x == y);
+ CHECK(!(x != y));
+
+ x.set_invalid();
+ CHECK(!(x == y));
+ CHECK(x != y);
+
+ y.MakeStatementPosition(1);
+ CHECK_EQ(y.source_position(), 1);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.MakeStatementPosition(2);
+ CHECK_EQ(y.source_position(), 2);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.set_invalid();
+ y.MakeExpressionPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), false);
+
+ y.MakeStatementPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), true);
+}
+
+TEST_F(BytecodeNodeTest, Constructor0) {
+ BytecodeNode node;
+ CHECK_EQ(node.bytecode(), Bytecode::kIllegal);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor1) {
+ BytecodeNode node(Bytecode::kLdaZero);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
+ CHECK_EQ(node.operand_count(), 0);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor2) {
+ uint32_t operands[] = {0x11};
+ BytecodeNode node(Bytecode::kJumpIfTrue, operands[0]);
+ CHECK_EQ(node.bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(node.operand_count(), 1);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor3) {
+ uint32_t operands[] = {0x11};
+ BytecodeNode node(Bytecode::kLdaGlobal, operands[0]);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaGlobal);
+ CHECK_EQ(node.operand_count(), 1);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor4) {
+ uint32_t operands[] = {0x11, 0x22, 0x33};
+ BytecodeNode node(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
+ CHECK_EQ(node.operand_count(), 3);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaNamedProperty);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK_EQ(node.operand(1), operands[1]);
+ CHECK_EQ(node.operand(2), operands[2]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor5) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ CHECK_EQ(node.operand_count(), 4);
+ CHECK_EQ(node.bytecode(), Bytecode::kForInNext);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK_EQ(node.operand(1), operands[1]);
+ CHECK_EQ(node.operand(2), operands[2]);
+ CHECK_EQ(node.operand(3), operands[3]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Equality) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ CHECK_EQ(node, node);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ CHECK_EQ(node, other);
+}
+
+TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
+ CHECK_EQ(node, node);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ other.source_info().MakeStatementPosition(3);
+ CHECK_EQ(node, other);
+}
+
+TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ CHECK_NE(node, other);
+}
+
+TEST_F(BytecodeNodeTest, Clone) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeNode clone;
+ clone.Clone(&node);
+ CHECK_EQ(clone, node);
+}
+
+TEST_F(BytecodeNodeTest, SetBytecode0) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeSourceInfo source_info(77, false);
+ node.source_info().Clone(source_info);
+ CHECK_EQ(node.source_info(), source_info);
+
+ BytecodeNode clone;
+ clone.Clone(&node);
+ clone.set_bytecode(Bytecode::kNop);
+ CHECK_EQ(clone.bytecode(), Bytecode::kNop);
+ CHECK_EQ(clone.operand_count(), 0);
+ CHECK_EQ(clone.source_info(), source_info);
+}
+
+TEST_F(BytecodeNodeTest, SetBytecode1) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeSourceInfo source_info(77, false);
+ node.source_info().Clone(source_info);
+
+ BytecodeNode clone;
+ clone.Clone(&node);
+ clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
+ CHECK_EQ(clone.bytecode(), Bytecode::kJump);
+ CHECK_EQ(clone.operand_count(), 1);
+ CHECK_EQ(clone.operand(0), 0x01aabbcc);
+ CHECK_EQ(clone.source_info(), source_info);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
new file mode 100644
index 0000000000..ca69026fda
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeRegisterOptimizerTest() {}
+ ~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
+
+ void Initialize(int number_of_parameters, int number_of_locals) {
+ register_allocator_ =
+ new TemporaryRegisterAllocator(zone(), number_of_locals);
+ register_optimizer_ = new (zone()) BytecodeRegisterOptimizer(
+ zone(), register_allocator_, number_of_parameters, this);
+ }
+
+ void Write(BytecodeNode* node) override { output_.push_back(*node); }
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ output_.push_back(*node);
+ }
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ TemporaryRegisterAllocator* allocator() { return register_allocator_; }
+ BytecodeRegisterOptimizer* optimizer() { return register_optimizer_; }
+
+ Register NewTemporary() {
+ return Register(allocator()->BorrowTemporaryRegister());
+ }
+
+ void KillTemporary(Register reg) {
+ allocator()->ReturnTemporaryRegister(reg.index());
+ }
+
+ size_t write_count() const { return output_.size(); }
+ const BytecodeNode& last_written() const { return output_.back(); }
+ const std::vector<BytecodeNode>* output() { return &output_; }
+
+ private:
+ TemporaryRegisterAllocator* register_allocator_;
+ BytecodeRegisterOptimizer* register_optimizer_;
+
+ std::vector<BytecodeNode> output_;
+};
+
+// Sanity tests.
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ optimizer()->BindLabel(&label);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+}
+
+// Basic Register Optimizations
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register temp = NewTemporary();
+ BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ KillTemporary(temp);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register local = Register(0);
+ BytecodeNode node1(Bytecode::kStar, local.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(1), local.ToOperand());
+
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 3);
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(1).operand(0), local.ToOperand());
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kMov, parameter.ToOperand(), temp0.ToOperand());
+ optimizer()->Write(&node0);
+ BytecodeNode node1(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 1);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(0).operand(0), 0);
+ CHECK_EQ(output()->at(0).operand(1), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(2), 1);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kLdaSmi, 3);
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
+ optimizer()->Write(&node1);
+ BytecodeNode node2(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node3(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 2);
+ optimizer()->Write(&node3);
+ CHECK_EQ(write_count(), 4);
+
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdaSmi);
+ CHECK_EQ(output()->at(0).operand(0), 3);
+
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
+
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(2).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(2).operand(1), temp1.ToOperand());
+
+ CHECK_EQ(output()->at(3).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(3).operand(0), 0);
+ CHECK_EQ(output()->at(3).operand(1), temp0.ToOperand());
+ CHECK_EQ(output()->at(3).operand(2), 2);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
new file mode 100644
index 0000000000..fffb7190c8
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
+#define V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
+
+#include "src/frames.h"
+
+#if V8_TARGET_LITTLE_ENDIAN
+
+#define EXTRACT(x, n) static_cast<uint8_t>((x) >> (8 * n))
+#define U16(i) EXTRACT(i, 0), EXTRACT(i, 1)
+#define U32(i) EXTRACT(i, 0), EXTRACT(i, 1), EXTRACT(i, 2), EXTRACT(i, 3)
+
+#elif V8_TARGET_BIG_ENDIAN
+
+#define EXTRACT(x, n) static_cast<uint8_t>((x) >> (8 * n))
+
+#define U16(i) EXTRACT(i, 1), EXTRACT(i, 0)
+#define U32(i) EXTRACT(i, 3), EXTRACT(i, 2), EXTRACT(i, 1), EXTRACT(i, 0)
+
+#else
+
+#error "Unknown Architecture"
+
+#endif
+
+#define U8(i) static_cast<uint8_t>(i)
+#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
+#define REG_OPERAND(i) \
+ (InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize - (i))
+#define R8(i) static_cast<uint8_t>(REG_OPERAND(i))
+#define R16(i) U16(REG_OPERAND(i))
+#define R32(i) U32(REG_OPERAND(i))
+
+#endif // V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index b3554c3853..0e68e188c7 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "test/unittests/test-utils.h"
@@ -31,15 +32,6 @@ TEST(OperandConversion, Registers) {
Register reg2 = Register::FromOperand(operand2);
CHECK_EQ(i, reg2.index());
}
-
- for (int i = 0; i <= kMaxUInt8; i++) {
- Register reg = Register::FromOperand(i);
- if (i > 0) {
- CHECK(reg.is_parameter());
- } else {
- CHECK(!reg.is_parameter());
- }
- }
}
TEST(OperandConversion, Parameters) {
@@ -85,45 +77,22 @@ TEST(OperandConversion, RegistersParametersNoOverlap) {
}
TEST(OperandScaling, ScalableAndNonScalable) {
- for (OperandScale operand_scale = OperandScale::kSingle;
- operand_scale <= OperandScale::kMaxValid;
- operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+ const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
int scale = static_cast<int>(operand_scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCallRuntime, operand_scale),
1 + 2 + 2 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCreateObjectLiteral, operand_scale),
- 1 + 2 * scale + 1);
+ 1 + 2 * scale + 1 + 1 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + scale);
}
}
-TEST(Bytecodes, HasAnyRegisterOperands) {
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kAdd), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCall), 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntime), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPair),
- 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kDeletePropertyStrict),
- 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepare), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kInc), 0);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kJumpIfTrue), 0);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kNew), 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kToName), 0);
-}
-
-TEST(Bytecodes, RegisterOperandBitmaps) {
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kAdd), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kCallRuntimeForPair),
- 10);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kStar), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kMov), 3);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kTestIn), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInPrepare), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInDone), 3);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInNext), 7);
-}
-
TEST(Bytecodes, RegisterOperands) {
CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg));
@@ -177,74 +146,6 @@ TEST(Bytecodes, DebugBreakExistForEachBytecode) {
#undef CHECK_DEBUG_BREAK_SIZE
}
-TEST(Bytecodes, DecodeBytecodeAndOperands) {
- struct BytecodesAndResult {
- const uint8_t bytecode[32];
- const size_t length;
- int parameter_count;
- const char* output;
- };
-
-#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
- const BytecodesAndResult cases[] = {
- {{B(LdaSmi), 0x01}, 2, 0, " LdaSmi [1]"},
- {{B(Wide), B(LdaSmi), 0xe8, 0x03}, 4, 0, " LdaSmi.Wide [1000]"},
- {{B(ExtraWide), B(LdaSmi), 0xa0, 0x86, 0x01, 0x00},
- 6,
- 0,
- "LdaSmi.ExtraWide [100000]"},
- {{B(LdaSmi), 0xff}, 2, 0, " LdaSmi [-1]"},
- {{B(Wide), B(LdaSmi), 0x18, 0xfc}, 4, 0, " LdaSmi.Wide [-1000]"},
- {{B(ExtraWide), B(LdaSmi), 0x60, 0x79, 0xfe, 0xff},
- 6,
- 0,
- "LdaSmi.ExtraWide [-100000]"},
- {{B(Star), 0xfb}, 2, 0, " Star r5"},
- {{B(Wide), B(Star), 0x78, 0xff}, 4, 0, " Star.Wide r136"},
- {{B(Wide), B(Call), 0x7a, 0xff, 0x79, 0xff, 0x02, 0x00, 0xb1, 0x00},
- 10,
- 0,
- "Call.Wide r134, r135, #2, [177]"},
- {{B(Ldar),
- static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
- 2,
- 3,
- " Ldar a1"},
- {{B(Wide), B(CreateObjectLiteral), 0x01, 0x02, 0x03, 0x04, 0xa5},
- 7,
- 0,
- "CreateObjectLiteral.Wide [513], [1027], #165"},
- {{B(ExtraWide), B(JumpIfNull), 0x15, 0xcd, 0x5b, 0x07},
- 6,
- 0,
- "JumpIfNull.ExtraWide [123456789]"},
- };
-#undef B
-
- for (size_t i = 0; i < arraysize(cases); ++i) {
- // Generate reference string by prepending formatted bytes.
- std::stringstream expected_ss;
- std::ios default_format(nullptr);
- default_format.copyfmt(expected_ss);
- // Match format of Bytecodes::Decode() for byte representations.
- expected_ss.fill('0');
- expected_ss.flags(std::ios::right | std::ios::hex);
- for (size_t b = 0; b < cases[i].length; b++) {
- expected_ss << std::setw(2) << static_cast<uint32_t>(cases[i].bytecode[b])
- << ' ';
- }
- expected_ss.copyfmt(default_format);
- expected_ss << cases[i].output;
-
- // Generate decoded byte output.
- std::stringstream actual_ss;
- Bytecodes::Decode(actual_ss, cases[i].bytecode, cases[i].parameter_count);
-
- // Compare.
- CHECK_EQ(actual_ss.str(), expected_ss.str());
- }
-}
-
TEST(Bytecodes, DebugBreakForPrefixBytecodes) {
CHECK_EQ(Bytecode::kDebugBreakWide,
Bytecodes::GetDebugBreak(Bytecode::kWide));
@@ -260,13 +161,42 @@ TEST(Bytecodes, PrefixMappings) {
}
}
-TEST(OperandScale, PrefixesScale) {
- CHECK(Bytecodes::NextOperandScale(OperandScale::kSingle) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::NextOperandScale(OperandScale::kDouble) ==
- OperandScale::kQuadruple);
- CHECK(Bytecodes::NextOperandScale(OperandScale::kQuadruple) ==
- OperandScale::kInvalid);
+TEST(Bytecodes, SizesForSignedOperands) {
+ CHECK(Bytecodes::SizeForSignedOperand(0) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8 + 1) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt8 - 1) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16 + 1) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt16 - 1) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt) == OperandSize::kQuad);
+}
+
+TEST(Bytecodes, SizesForUnsignedOperands) {
+ // int overloads
+ CHECK(Bytecodes::SizeForUnsignedOperand(0) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
+ OperandSize::kQuad);
+ // size_t overloads
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
+ OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8)) ==
+ OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8 + 1)) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt16)) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt32)) ==
+ OperandSize::kQuad);
}
TEST(OperandScale, PrefixesRequired) {
@@ -314,7 +244,6 @@ TEST(AccumulatorUse, AccumulatorUseToString) {
names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kReadWrite));
CHECK_EQ(names.size(), 4);
}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 71224370cc..38cbb6d534 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -29,7 +29,8 @@ STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilderTest::k8BitCapacity;
TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k16BitCapacity; i++) {
builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
}
@@ -39,24 +40,71 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
}
}
+TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37373;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37373;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
+ CanonicalHandleScope canonical(isolate());
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(),
+ isolate()->factory()->the_hole_value());
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kByte);
}
for (size_t i = 0; i < 2 * k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
if (i + reserved < k8BitCapacity) {
CHECK_LE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + 1);
- CHECK(builder.At(i)->SameValue(*object));
} else {
CHECK_GE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + reserved + 1);
- CHECK(builder.At(i + reserved)->SameValue(*object));
}
}
CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
@@ -67,54 +115,52 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
}
- // Commmit reserved entries with duplicates and check size does not change.
+ // Commit reserved entries with duplicates and check size does not change.
DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
size_t duplicates_in_idx8_space =
std::min(reserved, k8BitCapacity - reserved);
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
- isolate()->factory()->NewNumberFromSize(i));
+ Smi::FromInt(static_cast<int>(i)));
DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
}
- // Check all committed values match expected (holes where
- // duplicates_in_idx8_space allocated).
- for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
- Smi* smi = Smi::FromInt(static_cast<int>(i));
- CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
- }
- for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
- Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
- CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
- }
- for (size_t i = 0; i < reserved; i++) {
- size_t index = k8BitCapacity - reserved + i;
- CHECK(builder.At(index)->IsTheHole());
- }
-
// Now make reservations, and commit them with unique entries.
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kByte);
}
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
- Handle<Object> object =
- isolate()->factory()->NewNumberFromSize(2 * k8BitCapacity + i);
- size_t index = builder.CommitReservedEntry(OperandSize::kByte, object);
+ Smi* value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
+ size_t index = builder.CommitReservedEntry(OperandSize::kByte, value);
CHECK_EQ(static_cast<int>(index), k8BitCapacity - reserved + i);
- CHECK(builder.At(static_cast<int>(index))->SameValue(*object));
}
- CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
+
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), 2 * k8BitCapacity + reserved);
+
+ // Check all committed values match expected
+ for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ Smi* smi = Smi::FromInt(static_cast<int>(i));
+ CHECK(value->SameValue(smi));
+ }
+ for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
+ CHECK(value->SameValue(smi));
+ }
}
}
TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
+ CanonicalHandleScope canonical(isolate());
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(),
+ isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + 1);
}
for (size_t i = 0; i < reserved; i++) {
@@ -129,79 +175,59 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.CommitReservedEntry(operand_size, object);
+ builder.CommitReservedEntry(operand_size,
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = k8BitCapacity; i < k8BitCapacity + reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.CommitReservedEntry(operand_size, object);
+ builder.CommitReservedEntry(operand_size,
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + 1);
}
- }
-}
-
-
-TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
- ConstantArrayBuilder builder(isolate(), zone());
- static const size_t kNumberOfElements = 37;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
- }
- Handle<FixedArray> constant_array = builder.ToFixedArray();
- CHECK_EQ(constant_array->length(), kNumberOfElements);
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
- }
-}
-TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
- ConstantArrayBuilder builder(isolate(), zone());
- static const size_t kNumberOfElements = 37373;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
- }
- Handle<FixedArray> constant_array = builder.ToFixedArray();
- CHECK_EQ(constant_array->length(), kNumberOfElements);
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), k8BitCapacity + reserved);
+ for (size_t i = 0; i < k8BitCapacity + reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ CHECK(value->SameValue(*isolate()->factory()->NewNumberFromSize(i)));
+ }
}
}
TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
CHECK_EQ(builder.size(), 0);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + k8BitCapacity + 1);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
- builder.At(i + k8BitCapacity));
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), 2 * k8BitCapacity);
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> original = builder.At(k8BitCapacity + i);
- Handle<Object> duplicate = builder.At(i);
- CHECK(original->SameValue(*duplicate));
+ Object* original = constant_array->get(static_cast<int>(k8BitCapacity + i));
+ Object* duplicate = constant_array->get(static_cast<int>(i));
+ CHECK(original->SameValue(duplicate));
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
CHECK(original->SameValue(*reference));
}
}
TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
@@ -227,8 +253,9 @@ TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
}
TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
+ CanonicalHandleScope canonical(isolate());
static int kNumberOfHoles = 128;
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (int i = 0; i < kNumberOfHoles; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
@@ -237,7 +264,7 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
}
CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(256)), 256);
- Handle<FixedArray> constant_array = builder.ToFixedArray();
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 257);
for (int i = 128; i < 256; i++) {
CHECK(constant_array->get(i)->SameValue(
@@ -250,7 +277,8 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
}
TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (int i = 0; i < 256; i++) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
@@ -260,16 +288,12 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
for (int i = 65536; i < 131072; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kQuad);
}
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte,
- isolate()->factory()->NewNumber(1)),
- 0);
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort,
- isolate()->factory()->NewNumber(2)),
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte, Smi::FromInt(1)), 0);
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort, Smi::FromInt(2)),
256);
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad,
- isolate()->factory()->NewNumber(3)),
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad, Smi::FromInt(3)),
65536);
- Handle<FixedArray> constant_array = builder.ToFixedArray();
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 65537);
int count = 1;
for (int i = 0; i < constant_array->length(); ++i) {
@@ -283,6 +307,41 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
}
}
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithFixedReservations) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ if ((i % 2) == 0) {
+ CHECK_EQ(i, builder.AllocateEntry());
+ } else {
+ builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
+ }
+ }
+ CHECK_EQ(builder.size(), k16BitCapacity);
+
+ // Check values before reserved entries are inserted.
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ if ((i % 2) == 0) {
+ // Check reserved values are the hole.
+ Handle<Object> empty = builder.At(i);
+ CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
+ } else {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ }
+ }
+
+ // Insert reserved entries.
+ for (size_t i = 0; i < k16BitCapacity; i += 2) {
+ builder.InsertAllocatedEntry(
+ i, handle(Smi::FromInt(static_cast<int>(i)), isolate()));
+ }
+
+ // Check values after reserved entries are inserted.
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 0106c577bd..ff6f14df21 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -93,10 +93,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
}
Matcher<Node*>
@@ -104,10 +103,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
int offset) {
Matcher<Node*> load_matcher = IsLoad(
MachineType::Int8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
@@ -120,11 +118,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -139,10 +135,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return IsWord32Or(IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)),
@@ -157,11 +152,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -176,10 +169,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
load_matcher = IsWord32Or(
@@ -198,11 +190,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -217,10 +207,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return IsWord32Or(
@@ -239,11 +228,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -258,10 +245,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
load_matcher = IsWord32Or(
@@ -313,41 +299,59 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- m.Dispatch();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Dispatch();
OperandScale operand_scale = OperandScale::kSingle;
- Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(
- interpreter::Bytecodes::Size(bytecode, operand_scale)));
- Matcher<Node*> target_bytecode_matcher = m.IsLoad(
- MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- next_bytecode_offset_matcher);
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(
+ interpreter::Bytecodes::Size(bytecode, operand_scale)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ next_bytecode_offset_matcher);
if (kPointerSize == 8) {
target_bytecode_matcher = IsChangeUint32ToUint64(target_bytecode_matcher);
}
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
IsWordShl(target_bytecode_matcher, IsIntPtrConstant(kPointerSizeLog2)));
+ if (interpreter::Bytecodes::IsStarLookahead(bytecode, operand_scale)) {
+ Matcher<Node*> after_lookahead_offset =
+ IsIntPtrAdd(next_bytecode_offset_matcher,
+ IsIntPtrConstant(interpreter::Bytecodes::Size(
+ Bytecode::kStar, operand_scale)));
+ next_bytecode_offset_matcher =
+ IsPhi(MachineType::PointerRepresentation(),
+ next_bytecode_offset_matcher, after_lookahead_offset, _);
+ Matcher<Node*> after_lookahead_bytecode =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ after_lookahead_offset);
+ if (kPointerSize == 8) {
+ after_lookahead_bytecode =
+ IsChangeUint32ToUint64(after_lookahead_bytecode);
+ }
+ target_bytecode_matcher =
+ IsPhi(MachineRepresentation::kWord8, target_bytecode_matcher,
+ after_lookahead_bytecode, _);
+ code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
+ }
+
EXPECT_THAT(
tail_call_node,
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher,
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ IsTailCall(_, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator),
+ next_bytecode_offset_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ _, _));
}
}
@@ -358,15 +362,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
int jump_offsets[] = {-9710, -77, 0, +3, +97109};
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ if (!interpreter::Bytecodes::IsJump(bytecode)) return;
+
InterpreterAssemblerForTest m(this, bytecode);
- m.Jump(m.IntPtrConstant(jump_offset));
- Graph* graph = m.graph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Jump(m.IntPtrConstant(jump_offset));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
@@ -374,111 +376,23 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
target_bytecode_matcher =
IsChangeUint32ToUint64(target_bytecode_matcher);
}
- Matcher<Node*> code_target_matcher = m.IsLoad(
- MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWordShl(target_bytecode_matcher,
- IsIntPtrConstant(kPointerSizeLog2)));
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher, _,
- IsParameter(
- InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ IsTailCall(_, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator),
+ next_bytecode_offset_matcher, _,
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ _, _));
}
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
- static const int kJumpIfTrueOffset = 73;
-
- // If debug code is enabled we emit extra code in Jump.
- if (FLAG_debug_code) return;
-
- MachineOperatorBuilder machine(zone());
-
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Node* lhs = m.IntPtrConstant(0);
- Node* rhs = m.IntPtrConstant(1);
- m.JumpIfWordEqual(lhs, rhs, m.IntPtrConstant(kJumpIfTrueOffset));
- Graph* graph = m.graph();
- Node* end = graph->end();
- EXPECT_EQ(2, end->InputCount());
-
- OperandScale operand_scale = OperandScale::kSingle;
- int jump_offsets[] = {kJumpIfTrueOffset, interpreter::Bytecodes::Size(
- bytecode, operand_scale)};
- for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
- Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(jump_offsets[i]));
- Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
- if (kPointerSize == 8) {
- target_bytecode_matcher =
- IsChangeUint32ToUint64(target_bytecode_matcher);
- }
- Matcher<Node*> code_target_matcher = m.IsLoad(
- MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWordShl(target_bytecode_matcher,
- IsIntPtrConstant(kPointerSizeLog2)));
- EXPECT_THAT(
- end->InputAt(i),
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher, _,
- IsParameter(
- InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
- }
-
- // TODO(oth): test control flow paths.
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
- // If debug code is enabled we emit extra code in InterpreterReturn.
- if (FLAG_debug_code) return;
-
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- m.InterpreterReturn();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
-
- Handle<HeapObject> exit_trampoline =
- isolate()->builtins()->InterpreterExitTrampoline();
- EXPECT_THAT(
- tail_call_node,
- IsTailCall(
- _, IsHeapConstant(exit_trampoline),
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- _,
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
- }
-}
-
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
static const OperandScale kOperandScales[] = {
OperandScale::kSingle, OperandScale::kDouble, OperandScale::kQuadruple};
@@ -525,6 +439,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
m.IsUnsignedOperand(offset, operand_size));
break;
+ case interpreter::OperandType::kIntrinsicId:
+ EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -543,9 +461,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
- EXPECT_THAT(
- m.GetAccumulator(),
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+ EXPECT_THAT(m.GetAccumulator(),
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator));
// Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
@@ -555,24 +472,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
// Should be passed to next bytecode handler on dispatch.
- m.Dispatch();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Dispatch();
EXPECT_THAT(tail_call_node,
- IsTailCall(_, _, accumulator_value_2, _, _, _, _, _, _));
+ IsTailCall(_, _, accumulator_value_2, _, _, _, _));
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
+TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* context_node = m.Int32Constant(100);
- m.SetContext(context_node);
- EXPECT_THAT(m.GetContext(), context_node);
+ EXPECT_THAT(
+ m.GetContext(),
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsIntPtrConstant(Register::current_context().ToOperand()
+ << kPointerSizeLog2)));
}
}
@@ -581,11 +495,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
InterpreterAssemblerForTest m(this, bytecode);
Node* reg_index_node = m.IntPtrConstant(44);
Node* reg_location_node = m.RegisterLocation(reg_index_node);
- EXPECT_THAT(
- reg_location_node,
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(reg_location_node,
+ IsIntPtrAdd(IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node,
+ IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -594,12 +507,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
InterpreterAssemblerForTest m(this, bytecode);
Node* reg_index_node = m.IntPtrConstant(44);
Node* load_reg_node = m.LoadRegister(reg_index_node);
- EXPECT_THAT(
- load_reg_node,
- m.IsLoad(
- MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(load_reg_node,
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node,
+ IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -611,12 +522,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
- m.IsStore(
- StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
- store_value));
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
+ store_value));
}
}
@@ -624,9 +534,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44);
- EXPECT_THAT(
- m.SmiTag(value),
- IsWordShl(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiTag(value),
+ IsIntPtrConstant(static_cast<intptr_t>(44)
+ << (kSmiShiftSize + kSmiTagSize)));
EXPECT_THAT(
m.SmiUntag(value),
IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
@@ -669,7 +579,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
EXPECT_THAT(
load_constant,
@@ -730,14 +640,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
InterpreterAssemblerForTest m(this, bytecode);
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(4);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
- EXPECT_THAT(
- call_runtime,
- IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ EXPECT_THAT(call_runtime,
+ IsCall(_, _, arg1, arg2, _, IsInt32Constant(2), context, _, _));
}
}
@@ -751,8 +657,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(4);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
@@ -765,12 +670,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
- EXPECT_THAT(
- call_runtime,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function_entry,
- IsParameter(InterpreterDispatchDescriptor::kContextParameter),
- _, _));
+ EXPECT_THAT(call_runtime,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count,
+ first_arg, function_entry, context, _, _));
}
}
}
@@ -786,16 +688,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
Node* function = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(3);
Node* call_js =
m.CallJS(function, context, first_arg, arg_count, tail_call_mode);
- EXPECT_THAT(
- call_js,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function,
- IsParameter(InterpreterDispatchDescriptor::kContextParameter),
- _, _));
+ EXPECT_THAT(call_js, IsCall(_, IsHeapConstant(builtin.code()), arg_count,
+ first_arg, function, context, _, _));
}
}
}
@@ -805,21 +702,18 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
InterpreterAssemblerForTest m(this, bytecode);
Node* feedback_vector = m.LoadTypeFeedbackVector();
- Matcher<Node*> load_function_matcher = m.IsLoad(
- MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsIntPtrConstant(
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- Matcher<Node*> load_shared_function_info_matcher =
- m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
- IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag));
-
- EXPECT_THAT(
- feedback_vector,
- m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
- IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
- kHeapObjectTag)));
+ Matcher<Node*> load_function_matcher =
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsIntPtrConstant(Register::function_closure().ToOperand()
+ << kPointerSizeLog2));
+ Matcher<Node*> load_literals_matcher = m.IsLoad(
+ MachineType::AnyTagged(), load_function_matcher,
+ IsIntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag));
+
+ EXPECT_THAT(feedback_vector,
+ m.IsLoad(MachineType::AnyTagged(), load_literals_matcher,
+ IsIntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
+ kHeapObjectTag)));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 1ebdc77c18..e3e525273a 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -52,8 +52,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsUnsignedOperand(int offset,
OperandSize operand_size);
- using InterpreterAssembler::graph;
-
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index 175b311666..f0b41e78dd 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -44,5 +44,21 @@ TEST(WorkerThreadTest, Basic) {
queue.Terminate();
}
+TEST(WorkerThreadTest, PostSingleTask) {
+ TaskQueue queue;
+ WorkerThread thread1(&queue);
+ WorkerThread thread2(&queue);
+
+ InSequence s;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ queue.Append(task);
+
+ // The next call should not time out.
+ queue.BlockUntilQueueEmptyForTesting();
+ queue.Terminate();
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
new file mode 100644
index 0000000000..33453ce9bb
--- /dev/null
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -0,0 +1,166 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/register-configuration.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
+const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
+const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
+
+class RegisterConfigurationUnitTest : public ::testing::Test {
+ public:
+ RegisterConfigurationUnitTest() {}
+ virtual ~RegisterConfigurationUnitTest() {}
+
+ private:
+};
+
+TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 2;
+ int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
+ int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr,
+ nullptr);
+
+ EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
+ EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_general_registers(),
+ kNumAllocatableGeneralRegs);
+ EXPECT_EQ(test.num_allocatable_double_registers(), kNumAllocatableDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_float_registers(), kNumAllocatableDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_simd128_registers(),
+ kNumAllocatableDoubleRegs);
+
+ EXPECT_EQ(test.allocatable_general_codes_mask(),
+ (1 << general_codes[0]) | (1 << general_codes[1]));
+ EXPECT_EQ(test.GetAllocatableGeneralCode(0), general_codes[0]);
+ EXPECT_EQ(test.GetAllocatableGeneralCode(1), general_codes[1]);
+ EXPECT_EQ(test.allocatable_double_codes_mask(),
+ (1 << double_codes[0]) | (1 << double_codes[1]));
+ EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableDoubleCode(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableSimd128Code(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[1]);
+ EXPECT_EQ(test.GetAllocatableDoubleCode(1), double_codes[1]);
+ EXPECT_EQ(test.GetAllocatableSimd128Code(1), double_codes[1]);
+}
+
+TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 3;
+ int general_codes[] = {1, 2};
+ int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr,
+ nullptr);
+
+ // There are 3 allocatable double regs, but only 2 can alias float regs.
+ EXPECT_EQ(test.num_allocatable_float_registers(), 4);
+
+ // Test that float registers combine in pairs to form double registers.
+ EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[0] * 2 + 1);
+ EXPECT_EQ(test.GetAllocatableFloatCode(2), double_codes[1] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(3), double_codes[1] * 2 + 1);
+
+ // There are 3 allocatable double regs, but only 2 pair to form 1 SIMD reg.
+ EXPECT_EQ(test.num_allocatable_simd128_registers(), 1);
+
+ // Test that even-odd pairs of double regs combine to form a SIMD reg.
+ EXPECT_EQ(test.GetAllocatableSimd128Code(0), double_codes[0] / 2);
+
+ // Registers alias themselves.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kSimd128, 0));
+ // Registers don't alias other registers of the same size.
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat32, 0));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 1, kFloat64, 0));
+ EXPECT_FALSE(test.AreAliases(kSimd128, 1, kSimd128, 0));
+ // Float registers combine in pairs to alias a double with index / 2, and
+ // in 4's to alias a simd128 with index / 4.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 1, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 1, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 2, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 3, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 2));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 3));
+
+ EXPECT_FALSE(test.AreAliases(kFloat32, 0, kFloat64, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat64, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 0, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 0, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 1, kSimd128, 1));
+
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 2));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 3));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 4));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 5));
+
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat64, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 1, kFloat64, 2));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 1, kFloat64, 3));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 2, kFloat64, 4));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 2, kFloat64, 5));
+
+ int alias_base_index = -1;
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat32, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat32, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 2, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 3, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat64, 0, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 2);
+
+ // Non-allocatable codes still alias.
+ EXPECT_EQ(test.GetAliases(kFloat64, 2, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 4);
+ // High numbered double and simd regs don't alias nonexistent float registers.
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2 + 1,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters,
+ kFloat32, &alias_base_index),
+ 0);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index d62302a2cd..01d9675061 100644
--- a/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -4,7 +4,8 @@
#include "src/v8.h"
-#include "src/interpreter/source-position-table.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -23,60 +24,67 @@ static int offsets[] = {0, 1, 2, 3, 4, 30, 31, 32,
129, 250, 1000, 9999, 12000, 31415926};
TEST_F(SourcePositionTableTest, EncodeStatement) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddStatementPosition(offsets[i], offsets[i]);
+ builder.AddPosition(offsets[i], offsets[i], true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddStatementPosition(offsets[i], offsets[i]);
- builder.AddStatementPosition(offsets[i], offsets[i] + 1);
+ builder.AddPosition(offsets[i], offsets[i], true);
+ builder.AddPosition(offsets[i], offsets[i] + 1, true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeExpression) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddExpressionPosition(offsets[i], offsets[i]);
+ builder.AddPosition(offsets[i], offsets[i], false);
}
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeAscending) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
- int accumulator = 0;
+ int code_offset = 0;
+ int source_position = 0;
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator += offsets[i];
+ code_offset += offsets[i];
+ source_position += offsets[i];
if (i % 2) {
- builder.AddStatementPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddExpressionPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, false);
}
}
- // Also test negative offsets:
+ // Also test negative offsets for source positions:
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator -= offsets[i];
+ code_offset += offsets[i];
+ source_position -= offsets[i];
if (i % 2) {
- builder.AddStatementPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddExpressionPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, false);
}
}
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 7d04215143..6ac71d208e 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -13,23 +13,11 @@
namespace v8 {
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
-
// static
-ArrayBufferAllocator* TestWithIsolate::array_buffer_allocator_ = NULL;
+v8::ArrayBuffer::Allocator* TestWithIsolate::array_buffer_allocator_ = nullptr;
// static
-Isolate* TestWithIsolate::isolate_ = NULL;
-
+Isolate* TestWithIsolate::isolate_ = nullptr;
TestWithIsolate::TestWithIsolate()
: isolate_scope_(isolate()), handle_scope_(isolate()) {}
@@ -43,7 +31,7 @@ void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
EXPECT_EQ(NULL, isolate_);
v8::Isolate::CreateParams create_params;
- array_buffer_allocator_ = new ArrayBufferAllocator;
+ array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
isolate_ = v8::Isolate::New(create_params);
EXPECT_TRUE(isolate_ != NULL);
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 1342510b61..c5788e2478 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -27,7 +27,7 @@ class TestWithIsolate : public virtual ::testing::Test {
static void TearDownTestCase();
private:
- static ArrayBufferAllocator* array_buffer_allocator_;
+ static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
static Isolate* isolate_;
Isolate::Scope isolate_scope_;
HandleScope handle_scope_;
@@ -43,6 +43,10 @@ class TestWithContext : public virtual TestWithIsolate {
const Local<Context>& context() const { return context_; }
+ v8::internal::Isolate* i_isolate() const {
+ return reinterpret_cast<v8::internal::Isolate*>(isolate());
+ }
+
private:
Local<Context> context_;
Context::Scope context_scope_;
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 003281b020..0ea8b9a43d 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -2,11 +2,154 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# The sources are kept automatically in sync with BUILD.gn.
+
{
'variables': {
'v8_code': 1,
+ 'unittests_sources': [ ### gcmole(all) ###
+ 'base/atomic-utils-unittest.cc',
+ 'base/bits-unittest.cc',
+ 'base/cpu-unittest.cc',
+ 'base/division-by-constant-unittest.cc',
+ 'base/flags-unittest.cc',
+ 'base/functional-unittest.cc',
+ 'base/ieee754-unittest.cc',
+ 'base/logging-unittest.cc',
+ 'base/iterator-unittest.cc',
+ 'base/platform/condition-variable-unittest.cc',
+ 'base/platform/mutex-unittest.cc',
+ 'base/platform/platform-unittest.cc',
+ 'base/platform/semaphore-unittest.cc',
+ 'base/platform/time-unittest.cc',
+ 'base/sys-info-unittest.cc',
+ 'base/utils/random-number-generator-unittest.cc',
+ 'cancelable-tasks-unittest.cc',
+ 'char-predicates-unittest.cc',
+ 'compiler/branch-elimination-unittest.cc',
+ 'compiler/checkpoint-elimination-unittest.cc',
+ 'compiler/common-operator-reducer-unittest.cc',
+ 'compiler/common-operator-unittest.cc',
+ 'compiler/compiler-test-utils.h',
+ 'compiler/control-equivalence-unittest.cc',
+ 'compiler/control-flow-optimizer-unittest.cc',
+ 'compiler/dead-code-elimination-unittest.cc',
+ 'compiler/diamond-unittest.cc',
+ 'compiler/effect-control-linearizer-unittest.cc',
+ 'compiler/escape-analysis-unittest.cc',
+ 'compiler/graph-reducer-unittest.cc',
+ 'compiler/graph-reducer-unittest.h',
+ 'compiler/graph-trimmer-unittest.cc',
+ 'compiler/graph-unittest.cc',
+ 'compiler/graph-unittest.h',
+ 'compiler/instruction-selector-unittest.cc',
+ 'compiler/instruction-selector-unittest.h',
+ 'compiler/instruction-sequence-unittest.cc',
+ 'compiler/instruction-sequence-unittest.h',
+ 'compiler/int64-lowering-unittest.cc',
+ 'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-create-lowering-unittest.cc',
+ 'compiler/js-intrinsic-lowering-unittest.cc',
+ 'compiler/js-operator-unittest.cc',
+ 'compiler/js-typed-lowering-unittest.cc',
+ 'compiler/linkage-tail-call-unittest.cc',
+ 'compiler/liveness-analyzer-unittest.cc',
+ 'compiler/live-range-unittest.cc',
+ 'compiler/load-elimination-unittest.cc',
+ 'compiler/loop-peeling-unittest.cc',
+ 'compiler/machine-operator-reducer-unittest.cc',
+ 'compiler/machine-operator-unittest.cc',
+ 'compiler/move-optimizer-unittest.cc',
+ 'compiler/node-cache-unittest.cc',
+ 'compiler/node-matchers-unittest.cc',
+ 'compiler/node-properties-unittest.cc',
+ 'compiler/node-test-utils.cc',
+ 'compiler/node-test-utils.h',
+ 'compiler/node-unittest.cc',
+ 'compiler/opcodes-unittest.cc',
+ 'compiler/register-allocator-unittest.cc',
+ 'compiler/schedule-unittest.cc',
+ 'compiler/scheduler-unittest.cc',
+ 'compiler/scheduler-rpo-unittest.cc',
+ 'compiler/simplified-operator-reducer-unittest.cc',
+ 'compiler/simplified-operator-unittest.cc',
+ 'compiler/state-values-utils-unittest.cc',
+ 'compiler/tail-call-optimization-unittest.cc',
+ 'compiler/typer-unittest.cc',
+ 'compiler/value-numbering-reducer-unittest.cc',
+ 'compiler/zone-pool-unittest.cc',
+ 'compiler-dispatcher/compiler-dispatcher-job-unittest.cc',
+ 'counters-unittest.cc',
+ 'eh-frame-iterator-unittest.cc',
+ 'eh-frame-writer-unittest.cc',
+ 'interpreter/bytecodes-unittest.cc',
+ 'interpreter/bytecode-array-builder-unittest.cc',
+ 'interpreter/bytecode-array-iterator-unittest.cc',
+ 'interpreter/bytecode-array-writer-unittest.cc',
+ 'interpreter/bytecode-dead-code-optimizer-unittest.cc',
+ 'interpreter/bytecode-decoder-unittest.cc',
+ 'interpreter/bytecode-peephole-optimizer-unittest.cc',
+ 'interpreter/bytecode-pipeline-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/bytecode-register-optimizer-unittest.cc',
+ 'interpreter/constant-array-builder-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.h',
+ 'libplatform/default-platform-unittest.cc',
+ 'libplatform/task-queue-unittest.cc',
+ 'libplatform/worker-thread-unittest.cc',
+ 'heap/bitmap-unittest.cc',
+ 'heap/gc-idle-time-handler-unittest.cc',
+ 'heap/gc-tracer-unittest.cc',
+ 'heap/marking-unittest.cc',
+ 'heap/memory-reducer-unittest.cc',
+ 'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
+ 'heap/slot-set-unittest.cc',
+ 'locked-queue-unittest.cc',
+ 'register-configuration-unittest.cc',
+ 'run-all-unittests.cc',
+ 'source-position-table-unittest.cc',
+ 'test-utils.h',
+ 'test-utils.cc',
+ 'value-serializer-unittest.cc',
+ 'wasm/asm-types-unittest.cc',
+ 'wasm/ast-decoder-unittest.cc',
+ 'wasm/control-transfer-unittest.cc',
+ 'wasm/decoder-unittest.cc',
+ 'wasm/encoder-unittest.cc',
+ 'wasm/leb-helper-unittest.cc',
+ 'wasm/loop-assignment-analysis-unittest.cc',
+ 'wasm/module-decoder-unittest.cc',
+ 'wasm/switch-logic-unittest.cc',
+ 'wasm/wasm-macro-gen-unittest.cc',
+ ],
+ 'unittests_sources_arm': [ ### gcmole(arch:arm) ###
+ 'compiler/arm/instruction-selector-arm-unittest.cc',
+ ],
+ 'unittests_sources_arm64': [ ### gcmole(arch:arm64) ###
+ 'compiler/arm64/instruction-selector-arm64-unittest.cc',
+ ],
+ 'unittests_sources_ia32': [ ### gcmole(arch:ia32) ###
+ 'compiler/ia32/instruction-selector-ia32-unittest.cc',
+ ],
+ 'unittests_sources_mips': [ ### gcmole(arch:mips) ###
+ 'compiler/mips/instruction-selector-mips-unittest.cc',
+ ],
+ 'unittests_sources_mips64': [ ### gcmole(arch:mips64) ###
+ 'compiler/mips64/instruction-selector-mips64-unittest.cc',
+ ],
+ 'unittests_sources_x64': [ ### gcmole(arch:x64) ###
+ 'compiler/x64/instruction-selector-x64-unittest.cc',
+ ],
+ 'unittests_sources_ppc': [ ### gcmole(arch:ppc) ###
+ 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ 'unittests_sources_s390': [ ### gcmole(arch:s390) ###
+ 'compiler/s390/instruction-selector-s390-unittest.cc',
+ ],
},
- 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'unittests',
@@ -17,150 +160,63 @@
'dependencies': [
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
- '../../tools/gyp/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
],
- 'sources': [ ### gcmole(all) ###
- 'atomic-utils-unittest.cc',
- 'base/bits-unittest.cc',
- 'base/cpu-unittest.cc',
- 'base/division-by-constant-unittest.cc',
- 'base/flags-unittest.cc',
- 'base/functional-unittest.cc',
- 'base/logging-unittest.cc',
- 'base/iterator-unittest.cc',
- 'base/platform/condition-variable-unittest.cc',
- 'base/platform/mutex-unittest.cc',
- 'base/platform/platform-unittest.cc',
- 'base/platform/semaphore-unittest.cc',
- 'base/platform/time-unittest.cc',
- 'base/sys-info-unittest.cc',
- 'base/utils/random-number-generator-unittest.cc',
- 'cancelable-tasks-unittest.cc',
- 'char-predicates-unittest.cc',
- 'compiler/branch-elimination-unittest.cc',
- 'compiler/change-lowering-unittest.cc',
- 'compiler/coalesced-live-ranges-unittest.cc',
- 'compiler/common-operator-reducer-unittest.cc',
- 'compiler/common-operator-unittest.cc',
- 'compiler/compiler-test-utils.h',
- 'compiler/control-equivalence-unittest.cc',
- 'compiler/control-flow-optimizer-unittest.cc',
- 'compiler/dead-code-elimination-unittest.cc',
- 'compiler/diamond-unittest.cc',
- 'compiler/escape-analysis-unittest.cc',
- 'compiler/graph-reducer-unittest.cc',
- 'compiler/graph-reducer-unittest.h',
- 'compiler/graph-trimmer-unittest.cc',
- 'compiler/graph-unittest.cc',
- 'compiler/graph-unittest.h',
- 'compiler/instruction-selector-unittest.cc',
- 'compiler/instruction-selector-unittest.h',
- 'compiler/instruction-sequence-unittest.cc',
- 'compiler/instruction-sequence-unittest.h',
- 'compiler/int64-lowering-unittest.cc',
- 'compiler/js-builtin-reducer-unittest.cc',
- 'compiler/js-create-lowering-unittest.cc',
- 'compiler/js-intrinsic-lowering-unittest.cc',
- 'compiler/js-operator-unittest.cc',
- 'compiler/js-typed-lowering-unittest.cc',
- 'compiler/linkage-tail-call-unittest.cc',
- 'compiler/liveness-analyzer-unittest.cc',
- 'compiler/live-range-unittest.cc',
- 'compiler/load-elimination-unittest.cc',
- 'compiler/loop-peeling-unittest.cc',
- 'compiler/machine-operator-reducer-unittest.cc',
- 'compiler/machine-operator-unittest.cc',
- 'compiler/move-optimizer-unittest.cc',
- 'compiler/node-cache-unittest.cc',
- 'compiler/node-matchers-unittest.cc',
- 'compiler/node-properties-unittest.cc',
- 'compiler/node-test-utils.cc',
- 'compiler/node-test-utils.h',
- 'compiler/node-unittest.cc',
- 'compiler/opcodes-unittest.cc',
- 'compiler/register-allocator-unittest.cc',
- 'compiler/schedule-unittest.cc',
- 'compiler/scheduler-unittest.cc',
- 'compiler/scheduler-rpo-unittest.cc',
- 'compiler/simplified-operator-reducer-unittest.cc',
- 'compiler/simplified-operator-unittest.cc',
- 'compiler/state-values-utils-unittest.cc',
- 'compiler/tail-call-optimization-unittest.cc',
- 'compiler/typer-unittest.cc',
- 'compiler/value-numbering-reducer-unittest.cc',
- 'compiler/zone-pool-unittest.cc',
- 'counters-unittest.cc',
- 'interpreter/bytecodes-unittest.cc',
- 'interpreter/bytecode-array-builder-unittest.cc',
- 'interpreter/bytecode-array-iterator-unittest.cc',
- 'interpreter/bytecode-register-allocator-unittest.cc',
- 'interpreter/constant-array-builder-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.h',
- 'interpreter/source-position-table-unittest.cc',
- 'libplatform/default-platform-unittest.cc',
- 'libplatform/task-queue-unittest.cc',
- 'libplatform/worker-thread-unittest.cc',
- 'heap/bitmap-unittest.cc',
- 'heap/gc-idle-time-handler-unittest.cc',
- 'heap/gc-tracer-unittest.cc',
- 'heap/memory-reducer-unittest.cc',
- 'heap/heap-unittest.cc',
- 'heap/scavenge-job-unittest.cc',
- 'heap/slot-set-unittest.cc',
- 'locked-queue-unittest.cc',
- 'run-all-unittests.cc',
- 'test-utils.h',
- 'test-utils.cc',
- 'wasm/ast-decoder-unittest.cc',
- 'wasm/decoder-unittest.cc',
- 'wasm/encoder-unittest.cc',
- 'wasm/loop-assignment-analysis-unittest.cc',
- 'wasm/module-decoder-unittest.cc',
- 'wasm/wasm-macro-gen-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources)',
],
'conditions': [
['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'compiler/arm/instruction-selector-arm-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_arm)',
],
}],
['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'compiler/arm64/instruction-selector-arm64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_arm64)',
],
}],
['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'compiler/ia32/instruction-selector-ia32-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_ia32)',
+ ],
+ }],
+ ['v8_target_arch=="mips"', {
+ 'sources': [
+ '<@(unittests_sources_mips)',
],
}],
['v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'compiler/mips/instruction-selector-mips-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_mips)',
+ ],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'sources': [
+ '<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'compiler/mips64/instruction-selector-mips64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'compiler/x64/instruction-selector-x64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_x64)',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_ppc)',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'compiler/s390/instruction-selector-s390-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_s390)',
],
}],
['OS=="aix"', {
@@ -169,9 +225,9 @@
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
}, {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ 'dependencies': ['../../src/v8.gyp:v8'],
}],
['os_posix == 1', {
# TODO(svenpanne): This is a temporary work-around to fix the warnings
@@ -198,7 +254,7 @@
'unittests',
],
'includes': [
- '../../build/isolate.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'unittests.isolate',
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 40b5754f2a..ee135ba5e8 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -11,6 +11,12 @@
'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
'AstDecoderTest.AllLoadMemCombinations': [SKIP],
'AstDecoderTest.AllStoreMemCombinations': [SKIP],
- 'Bytecodes.DecodeBytecodeAndOperands': [SKIP],
}], # 'byteorder == big'
+['arch == x87', {
+ 'Ieee754.Expm1': [SKIP],
+ 'Ieee754.Cos': [SKIP],
+ 'Ieee754.Tan': [SKIP],
+ 'Ieee754.Acosh': [SKIP],
+ 'Ieee754.Asinh': [SKIP],
+}], # 'arch == x87'
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
new file mode 100644
index 0000000000..f4ed15b644
--- /dev/null
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -0,0 +1,1368 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/value-serializer.h"
+
+#include <algorithm>
+#include <string>
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/base/build_config.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace {
+
+class ValueSerializerTest : public TestWithIsolate {
+ protected:
+ ValueSerializerTest()
+ : serialization_context_(Context::New(isolate())),
+ deserialization_context_(Context::New(isolate())) {}
+
+ const Local<Context>& serialization_context() {
+ return serialization_context_;
+ }
+ const Local<Context>& deserialization_context() {
+ return deserialization_context_;
+ }
+
+ template <typename InputFunctor, typename OutputFunctor>
+ void RoundTripTest(const InputFunctor& input_functor,
+ const OutputFunctor& output_functor) {
+ EncodeTest(input_functor,
+ [this, &output_functor](const std::vector<uint8_t>& data) {
+ DecodeTest(data, output_functor);
+ });
+ }
+
+ // Variant for the common case where a script is used to build the original
+ // value.
+ template <typename OutputFunctor>
+ void RoundTripTest(const char* source, const OutputFunctor& output_functor) {
+ RoundTripTest([this, source]() { return EvaluateScriptForInput(source); },
+ output_functor);
+ }
+
+ Maybe<std::vector<uint8_t>> DoEncode(Local<Value> value) {
+ // This approximates what the API implementation would do.
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueSerializer serializer(internal_isolate);
+ serializer.WriteHeader();
+ if (serializer.WriteObject(Utils::OpenHandle(*value)).FromMaybe(false)) {
+ return Just(serializer.ReleaseBuffer());
+ }
+ if (internal_isolate->has_pending_exception()) {
+ internal_isolate->OptionalRescheduleException(true);
+ }
+ return Nothing<std::vector<uint8_t>>();
+ }
+
+ template <typename InputFunctor, typename EncodedDataFunctor>
+ void EncodeTest(const InputFunctor& input_functor,
+ const EncodedDataFunctor& encoded_data_functor) {
+ Context::Scope scope(serialization_context());
+ TryCatch try_catch(isolate());
+ Local<Value> input_value = input_functor();
+ std::vector<uint8_t> buffer;
+ ASSERT_TRUE(DoEncode(input_value).To(&buffer));
+ ASSERT_FALSE(try_catch.HasCaught());
+ encoded_data_functor(buffer);
+ }
+
+ template <typename MessageFunctor>
+ void InvalidEncodeTest(const char* source, const MessageFunctor& functor) {
+ Context::Scope scope(serialization_context());
+ TryCatch try_catch(isolate());
+ Local<Value> input_value = EvaluateScriptForInput(source);
+ ASSERT_TRUE(DoEncode(input_value).IsNothing());
+ functor(try_catch.Message());
+ }
+
+ void InvalidEncodeTest(const char* source) {
+ InvalidEncodeTest(source, [](Local<Message>) {});
+ }
+
+ template <typename OutputFunctor>
+ void DecodeTest(const std::vector<uint8_t>& data,
+ const OutputFunctor& output_functor) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
+ Local<Value> result;
+ ASSERT_TRUE(ToLocal<Value>(deserializer.ReadObject(), &result));
+ ASSERT_FALSE(result.IsEmpty());
+ ASSERT_FALSE(try_catch.HasCaught());
+ ASSERT_TRUE(deserialization_context()
+ ->Global()
+ ->CreateDataProperty(deserialization_context_,
+ StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ output_functor(result);
+ ASSERT_FALSE(try_catch.HasCaught());
+ }
+
+ template <typename OutputFunctor>
+ void DecodeTestForVersion0(const std::vector<uint8_t>& data,
+ const OutputFunctor& output_functor) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ // TODO(jbroman): Enable legacy support.
+ ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
+ // TODO(jbroman): Check version 0.
+ Local<Value> result;
+ ASSERT_TRUE(ToLocal<Value>(
+ deserializer.ReadObjectUsingEntireBufferForLegacyFormat(), &result));
+ ASSERT_FALSE(result.IsEmpty());
+ ASSERT_FALSE(try_catch.HasCaught());
+ ASSERT_TRUE(deserialization_context()
+ ->Global()
+ ->CreateDataProperty(deserialization_context_,
+ StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ output_functor(result);
+ ASSERT_FALSE(try_catch.HasCaught());
+ }
+
+ void InvalidDecodeTest(const std::vector<uint8_t>& data) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ Maybe<bool> header_result = deserializer.ReadHeader();
+ if (header_result.IsNothing()) return;
+ ASSERT_TRUE(header_result.ToChecked());
+ ASSERT_TRUE(deserializer.ReadObject().is_null());
+ }
+
+ Local<Value> EvaluateScriptForInput(const char* utf8_source) {
+ Local<String> source = StringFromUtf8(utf8_source);
+ Local<Script> script =
+ Script::Compile(serialization_context_, source).ToLocalChecked();
+ return script->Run(serialization_context_).ToLocalChecked();
+ }
+
+ bool EvaluateScriptForResultBool(const char* utf8_source) {
+ Local<String> source = StringFromUtf8(utf8_source);
+ Local<Script> script =
+ Script::Compile(deserialization_context_, source).ToLocalChecked();
+ Local<Value> value = script->Run(deserialization_context_).ToLocalChecked();
+ return value->BooleanValue(deserialization_context_).FromJust();
+ }
+
+ Local<String> StringFromUtf8(const char* source) {
+ return String::NewFromUtf8(isolate(), source, NewStringType::kNormal)
+ .ToLocalChecked();
+ }
+
+ static std::string Utf8Value(Local<Value> value) {
+ String::Utf8Value utf8(value);
+ return std::string(*utf8, utf8.length());
+ }
+
+ private:
+ Local<Context> serialization_context_;
+ Local<Context> deserialization_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueSerializerTest);
+};
+
+TEST_F(ValueSerializerTest, DecodeInvalid) {
+ // Version tag but no content.
+ InvalidDecodeTest({0xff});
+ // Version too large.
+ InvalidDecodeTest({0xff, 0x7f, 0x5f});
+ // Nonsense tag.
+ InvalidDecodeTest({0xff, 0x09, 0xdd});
+}
+
+TEST_F(ValueSerializerTest, RoundTripOddball) {
+ RoundTripTest([this]() { return Undefined(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ RoundTripTest([this]() { return True(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ RoundTripTest([this]() { return False(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ RoundTripTest([this]() { return Null(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+}
+
+TEST_F(ValueSerializerTest, DecodeOddball) {
+ // What this code is expected to generate.
+ DecodeTest({0xff, 0x09, 0x5f},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0xff, 0x09, 0x54},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0xff, 0x09, 0x46},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0xff, 0x09, 0x30},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+
+ // What v9 of the Blink code generates.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x5f, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x54, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x46, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x30, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+
+ // v0 (with no explicit version).
+ DecodeTest({0x5f, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0x54, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0x46, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0x30, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+}
+
+TEST_F(ValueSerializerTest, RoundTripNumber) {
+ RoundTripTest([this]() { return Integer::New(isolate(), 42); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ RoundTripTest([this]() { return Integer::New(isolate(), -31337); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(-31337, Int32::Cast(*value)->Value());
+ });
+ RoundTripTest(
+ [this]() {
+ return Integer::New(isolate(), std::numeric_limits<int32_t>::min());
+ },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(),
+ Int32::Cast(*value)->Value());
+ });
+ RoundTripTest([this]() { return Number::New(isolate(), -0.25); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ });
+ RoundTripTest(
+ [this]() {
+ return Number::New(isolate(), std::numeric_limits<double>::quiet_NaN());
+ },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeNumber) {
+ // 42 zig-zag encoded (signed)
+ DecodeTest({0xff, 0x09, 0x49, 0x54},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ // 42 varint encoded (unsigned)
+ DecodeTest({0xff, 0x09, 0x55, 0x2a},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ // 160 zig-zag encoded (signed)
+ DecodeTest({0xff, 0x09, 0x49, 0xc0, 0x02},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
+ // 160 varint encoded (unsigned)
+ DecodeTest({0xff, 0x09, 0x55, 0xa0, 0x01},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ // IEEE 754 doubles, little-endian byte order
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0xbf},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ });
+ // quiet NaN
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x7f},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+ // signaling NaN
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x7f},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+#endif
+ // TODO(jbroman): Equivalent test for big-endian machines.
+}
+
+// String constants (in UTF-8) used for string encoding tests.
+static const char kHelloString[] = "Hello";
+static const char kQuebecString[] = "\x51\x75\xC3\xA9\x62\x65\x63";
+static const char kEmojiString[] = "\xF0\x9F\x91\x8A";
+
+TEST_F(ValueSerializerTest, RoundTripString) {
+ RoundTripTest([this]() { return String::Empty(isolate()); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ // Inside ASCII.
+ RoundTripTest([this]() { return StringFromUtf8(kHelloString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ // Inside Latin-1 (i.e. one-byte string), but not ASCII.
+ RoundTripTest([this]() { return StringFromUtf8(kQuebecString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ // An emoji (decodes to two 16-bit chars).
+ RoundTripTest([this]() { return StringFromUtf8(kEmojiString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeString) {
+ // Decoding the strings above from UTF-8.
+ DecodeTest({0xff, 0x09, 0x53, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x07, 'Q', 'u', 0xc3, 0xa9, 'b', 'e', 'c'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+
+// And from two-byte strings (endianness dependent).
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x63, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x0a, 'H', '\0', 'e', '\0', 'l', '\0', 'l',
+ '\0', 'o', '\0'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x0c, 'Q', '\0', 'u', '\0', 0xe9, '\0', 'b',
+ '\0', 'e', '\0', 'c', '\0'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x04, 0x3d, 0xd8, 0x4a, 0xdc},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+#endif
+ // TODO(jbroman): The same for big-endian systems.
+}
+
+TEST_F(ValueSerializerTest, DecodeInvalidString) {
+ // UTF-8 string with too few bytes available.
+ InvalidDecodeTest({0xff, 0x09, 0x53, 0x10, 'v', '8'});
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ // Two-byte string with too few bytes available.
+ InvalidDecodeTest({0xff, 0x09, 0x63, 0x10, 'v', '\0', '8', '\0'});
+ // Two-byte string with an odd byte length.
+ InvalidDecodeTest({0xff, 0x09, 0x63, 0x03, 'v', '\0', '8'});
+#endif
+ // TODO(jbroman): The same for big-endian systems.
+}
+
+TEST_F(ValueSerializerTest, EncodeTwoByteStringUsesPadding) {
+ // As long as the output has a version that Blink expects to be able to read,
+ // we must respect its alignment requirements. It requires that two-byte
+ // characters be aligned.
+ EncodeTest(
+ [this]() {
+ // We need a string whose length will take two bytes to encode, so that
+ // a padding byte is needed to keep the characters aligned. The string
+ // must also have a two-byte character, so that it gets the two-byte
+ // encoding.
+ std::string string(200, ' ');
+ string += kEmojiString;
+ return StringFromUtf8(string.c_str());
+ },
+ [](const std::vector<uint8_t>& data) {
+ // This is a sufficient but not necessary condition to be aligned.
+ // Note that the third byte (0x00) is padding.
+ const uint8_t expected_prefix[] = {0xff, 0x09, 0x00, 0x63, 0x94, 0x03};
+ ASSERT_GT(data.size(), sizeof(expected_prefix) / sizeof(uint8_t));
+ EXPECT_TRUE(std::equal(std::begin(expected_prefix),
+ std::end(expected_prefix), data.begin()));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripDictionaryObject) {
+ // Empty object.
+ RoundTripTest("({})", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ RoundTripTest("({ a: 42 })", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ RoundTripTest("({ 42: 'a' })", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ RoundTripTest("({ x: 1, y: 2, a: 3 })", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A harder case of enumeration order.
+ // Indexes first, in order (but not 2^32 - 1, which is not an index), then the
+ // remaining (string) keys, in the order they were defined.
+ RoundTripTest(
+ "({ a: 2, 0xFFFFFFFF: 1, 0xFFFFFFFE: 3, 1: 0 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
+ });
+ // This detects a fairly subtle case: the object itself must be in the map
+ // before its properties are deserialized, so that references to it can be
+ // resolved.
+ RoundTripTest(
+ "(() => { var y = {}; y.self = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
+ // Empty object.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6f, 0x7b, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
+ 0x49, 0x54, 0x7b, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01, 0x53,
+ 0x01, 0x61, 0x7b, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x78, 0x3f, 0x01,
+ 0x49, 0x02, 0x3f, 0x01, 0x53, 0x01, 0x79, 0x3f, 0x01, 0x49, 0x04, 0x3f,
+ 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49, 0x06, 0x7b, 0x03},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A harder case of enumeration order.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x02, 0x3f, 0x01,
+ 0x49, 0x00, 0x3f, 0x01, 0x55, 0xfe, 0xff, 0xff, 0xff, 0x0f, 0x3f,
+ 0x01, 0x49, 0x06, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49,
+ 0x04, 0x3f, 0x01, 0x53, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
+ 0x37, 0x32, 0x39, 0x35, 0x3f, 0x01, 0x49, 0x02, 0x7b, 0x04},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
+ });
+ // This detects a fairly subtle case: the object itself must be in the map
+ // before its properties are deserialized, so that references to it can be
+ // resolved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x04, 0x73,
+ 0x65, 0x6c, 0x66, 0x3f, 0x01, 0x5e, 0x00, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripOnlyOwnEnumerableStringKeys) {
+ // Only "own" properties should be serialized, not ones on the prototype.
+ RoundTripTest("(() => { var x = {}; x.__proto__ = {a: 4}; return x; })()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
+ });
+ // Only enumerable properties should be serialized.
+ RoundTripTest(
+ "(() => {"
+ " var x = {};"
+ " Object.defineProperty(x, 'a', {value: 1, enumerable: false});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
+ });
+ // Symbol keys should not be serialized.
+ RoundTripTest("({ [Symbol()]: 4 })", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertySymbols(result).length === 0"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripTrickyGetters) {
+ // Keys are enumerated before any setters are called, but if there is no own
+ // property when the value is to be read, then it should not be serialized.
+ RoundTripTest("({ get a() { delete this.b; return 1; }, b: 2 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // Keys added after the property enumeration should not be serialized.
+ RoundTripTest("({ get a() { this.b = 3; }})", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // But if you remove a key and add it back, that's fine. But it will appear in
+ // the original place in enumeration order.
+ RoundTripTest(
+ "({ get a() { delete this.b; this.b = 4; }, b: 2, c: 3 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'a,b,c'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 4"));
+ });
+ // Similarly, it only matters if a property was enumerable when the
+ // enumeration happened.
+ RoundTripTest(
+ "({ get a() {"
+ " Object.defineProperty(this, 'b', {value: 2, enumerable: false});"
+ "}, b: 1})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 2"));
+ });
+ RoundTripTest(
+ "(() => {"
+ " var x = {"
+ " get a() {"
+ " Object.defineProperty(this, 'b', {value: 2, enumerable: true});"
+ " }"
+ " };"
+ " Object.defineProperty(x, 'b',"
+ " {value: 1, enumerable: false, configurable: true});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // The property also should not be read if it can only be found on the
+ // prototype chain (but not as an own property) after enumeration.
+ RoundTripTest(
+ "(() => {"
+ " var x = { get a() { delete this.b; }, b: 1 };"
+ " x.__proto__ = { b: 0 };"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // If an exception is thrown by script, encoding must fail and the exception
+ // must be thrown.
+ InvalidEncodeTest("({ get a() { throw new Error('sentinel'); } })",
+ [](Local<Message> message) {
+ ASSERT_FALSE(message.IsEmpty());
+ EXPECT_NE(std::string::npos,
+ Utf8Value(message->Get()).find("sentinel"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
+ // Empty object.
+ DecodeTestForVersion0(
+ {0x7b, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ DecodeTestForVersion0(
+ {0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ DecodeTestForVersion0(
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ DecodeTestForVersion0(
+ {0x53, 0x01, 0x78, 0x49, 0x02, 0x53, 0x01, 0x79, 0x49, 0x04, 0x53, 0x01,
+ 0x61, 0x49, 0x06, 0x7b, 0x03, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A property and an element.
+ DecodeTestForVersion0(
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === '42,a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArray) {
+ // A simple array of integers.
+ RoundTripTest("[1, 2, 3, 4, 5]", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Array.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '1,2,3,4,5'"));
+ });
+ // A long (sparse) array.
+ RoundTripTest(
+ "(() => { var x = new Array(1000); x[500] = 42; return x; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
+ });
+ // Duplicate reference.
+ RoundTripTest(
+ "(() => { var y = {}; return [y, y]; })()", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
+ });
+ // Duplicate reference in a sparse array.
+ RoundTripTest(
+ "(() => { var x = new Array(1000); x[1] = x[500] = {}; return x; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'object'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
+ });
+ // Self reference.
+ RoundTripTest(
+ "(() => { var y = []; y[0] = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
+ });
+ // Self reference in a sparse array.
+ RoundTripTest(
+ "(() => { var y = new Array(1000); y[519] = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
+ });
+ // Array with additional properties.
+ RoundTripTest(
+ "(() => { var y = [1, 2]; y.foo = 'bar'; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // Sparse array with additional properties.
+ RoundTripTest(
+ "(() => { var y = new Array(1000); y.foo = 'bar'; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === ','.repeat(999)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // The distinction between holes and undefined elements must be maintained.
+ RoundTripTest("[,undefined]", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeArray) {
+ // A simple array of integers.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x05, 0x3f, 0x01, 0x49, 0x02,
+ 0x3f, 0x01, 0x49, 0x04, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01,
+ 0x49, 0x08, 0x3f, 0x01, 0x49, 0x0a, 0x24, 0x00, 0x05, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Array.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '1,2,3,4,5'"));
+ });
+ // A long (sparse) array.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0xe8, 0x07, 0x3f, 0x01, 0x49, 0x54, 0x40, 0x01, 0xe8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
+ });
+ // Duplicate reference.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f,
+ 0x02, 0x5e, 0x01, 0x24, 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
+ });
+ // Duplicate reference in a sparse array.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f, 0x02, 0x49, 0xe8,
+ 0x07, 0x3f, 0x02, 0x5e, 0x01, 0x40, 0x02, 0xe8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'object'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
+ });
+ // Self reference.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x01, 0x3f, 0x01, 0x5e, 0x00, 0x24,
+ 0x00, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
+ });
+ // Self reference in a sparse array.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0x8e, 0x08, 0x3f, 0x01, 0x5e, 0x00, 0x40, 0x01, 0xe8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
+ });
+ // Array with additional properties.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x49, 0x02, 0x3f,
+ 0x01, 0x49, 0x04, 0x3f, 0x01, 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f,
+ 0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // Sparse array with additional properties.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01,
+ 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f, 0x01, 0x53, 0x03,
+ 0x62, 0x61, 0x72, 0x40, 0x01, 0xe8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === ','.repeat(999)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // The distinction between holes and undefined elements must be maintained.
+ // Note that since the previous output from Chrome fails this test, an
+ // encoding using the sparse format was constructed instead.
+ DecodeTest(
+ {0xff, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5f, 0x40, 0x01, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
+ // Even though this array looks like [1,5,3], the 5 should be missing from the
+ // perspective of structured clone, which only clones properties that were
+ // enumerable.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1,2,3];"
+ " Object.defineProperty(x, '1', {enumerable:false, value:5});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty('1')"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
+ // If an element is deleted before it is serialized, then it's deleted.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; }}, 42];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; }}, 42];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
+ });
+ // If the length is changed, then the resulting array still has the original
+ // length, but elements that were not yet serialized are gone.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(4, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
+ });
+ // If a getter makes a property non-enumerable, it should still be enumerated
+ // as enumeration happens once before getters are invoked.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ " }}, 2];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ " }}, 2];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
+ });
+ // Getters on the array itself must also run.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, 2, 3];"
+ " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, 2, 3];"
+ " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
+ });
+ // Even with a getter that deletes things, we don't read from the prototype.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; } }, 2];"
+ " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; } }, 2];"
+ " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
+ // Empty (sparse) array.
+ DecodeTestForVersion0({0x40, 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(0, Array::Cast(*value)->Length());
+ });
+ // Sparse array with a mixture of elements and properties.
+ DecodeTestForVersion0(
+ {0x55, 0x00, 0x53, 0x01, 'a', 0x55, 0x02, 0x55, 0x05, 0x53,
+ 0x03, 'f', 'o', 'o', 0x53, 0x03, 'b', 'a', 'r', 0x53,
+ 0x03, 'b', 'a', 'z', 0x49, 0x0b, 0x40, 0x04, 0x03, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === 'a,,5'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.baz === -6"));
+ });
+ // Sparse array in a sparse array (sanity check of nesting).
+ DecodeTestForVersion0(
+ {0x55, 0x01, 0x55, 0x01, 0x54, 0x40, 0x01, 0x02, 0x40, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] instanceof Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result[1])"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1][1] === true"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripDate) {
+ RoundTripTest("new Date(1e6)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ RoundTripTest("new Date(Date.UTC(1867, 6, 1))", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ RoundTripTest("new Date(NaN)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+ RoundTripTest(
+ "({ a: new Date(), get b() { return this.a; } })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDate) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
+ 0x2e, 0x41, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89, 0x87,
+ 0xc2, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x7f, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+#else
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x41, 0x2e, 0x84, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x44, 0xc2, 0x87, 0x89, 0x27, 0x45, 0x20, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+#endif
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
+ 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6a, 0x75, 0x42, 0x3f,
+ 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripValueObjects) {
+ RoundTripTest("new Boolean(true)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
+ });
+ RoundTripTest("new Boolean(false)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
+ });
+ RoundTripTest(
+ "({ a: new Boolean(true), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ RoundTripTest("new Number(-42)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ RoundTripTest("new Number(NaN)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("Number.isNaN(result.valueOf())"));
+ });
+ RoundTripTest(
+ "({ a: new Number(6), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ RoundTripTest("new String('Qu\\xe9bec')", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.valueOf() === 'Qu\\xe9bec'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
+ });
+ RoundTripTest("new String('\\ud83d\\udc4a')", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.valueOf() === '\\ud83d\\udc4a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
+ });
+ RoundTripTest(
+ "({ a: new String(), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
+ // This is a roundabout way of getting an instance of Symbol.
+ InvalidEncodeTest("Object.valueOf.apply(Symbol())");
+}
+
+TEST_F(ValueSerializerTest, DecodeValueObjects) {
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x79, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x78, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
+ 0x79, 0x3f, 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
+ 0xc0, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x7f, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Number.isNaN(result.valueOf())"));
+ });
+#else
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6e, 0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Number.isNaN(result.valueOf())"));
+ });
+#endif
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
+ 0x01, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3f,
+ 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ 0x65, 0x63, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.valueOf() === 'Qu\\xe9bec'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.valueOf() === '\\ud83d\\udc4a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01,
+ 0x61, 0x3f, 0x01, 0x73, 0x00, 0x3f, 0x02, 0x53, 0x01,
+ 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripRegExp) {
+ RoundTripTest("/foo/g", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '/foo/g'"));
+ });
+ RoundTripTest("new RegExp('Qu\\xe9bec', 'i')", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '/Qu\\xe9bec/i'"));
+ });
+ RoundTripTest("new RegExp('\\ud83d\\udc4a', 'ug')",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/\\ud83d\\udc4a/gu'"));
+ });
+ RoundTripTest(
+ "({ a: /foo/gi, get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeRegExp) {
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/foo/g'"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ 0x65, 0x63, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/Qu\\xe9bec/i'"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x52, 0x04, 0xf0, 0x9f, 0x91, 0x8a, 0x11, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/\\ud83d\\udc4a/gu'"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61,
+ 0x3f, 0x01, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x03, 0x3f, 0x02,
+ 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+} // namespace
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index c2abc8a6ad..eda8deabfd 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -1,3 +1,5 @@
-titzer@chromium.org
-bradnelson@chromium.org
ahaas@chromium.org
+bradnelson@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/test/unittests/wasm/asm-types-unittest.cc b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
new file mode 100644
index 0000000000..36493df86d
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
@@ -0,0 +1,723 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/asmjs/asm-types.h"
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "src/base/macros.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace {
+
+using ::testing::StrEq;
+
+class AsmTypeTest : public TestWithZone {
+ public:
+ using Type = AsmType;
+
+ AsmTypeTest()
+ : parents_({
+ {Type::Uint8Array(), {Type::Heap()}},
+ {Type::Int8Array(), {Type::Heap()}},
+ {Type::Uint16Array(), {Type::Heap()}},
+ {Type::Int16Array(), {Type::Heap()}},
+ {Type::Uint32Array(), {Type::Heap()}},
+ {Type::Int32Array(), {Type::Heap()}},
+ {Type::Float32Array(), {Type::Heap()}},
+ {Type::Float64Array(), {Type::Heap()}},
+ {Type::Float(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(), Type::FloatQ(),
+ Type::Floatish()}},
+ {Type::Floatish(), {Type::FloatishDoubleQ()}},
+ {Type::FloatQ(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(),
+ Type::Floatish()}},
+ {Type::FixNum(),
+ {Type::Signed(), Type::Extern(), Type::Unsigned(), Type::Int(),
+ Type::Intish()}},
+ {Type::Unsigned(), {Type::Int(), Type::Intish()}},
+ {Type::Signed(), {Type::Extern(), Type::Int(), Type::Intish()}},
+ {Type::Int(), {Type::Intish()}},
+ {Type::DoubleQ(), {Type::FloatishDoubleQ(), Type::FloatQDoubleQ()}},
+ {Type::Double(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(), Type::DoubleQ(),
+ Type::Extern()}},
+ }) {}
+
+ protected:
+ std::unordered_set<Type*> ParentsOf(Type* derived) const {
+ const auto parents_iter = parents_.find(derived);
+ if (parents_iter == parents_.end()) {
+ return std::unordered_set<Type*>();
+ }
+ return parents_iter->second;
+ }
+
+ class FunctionTypeBuilder {
+ public:
+ FunctionTypeBuilder(FunctionTypeBuilder&& b)
+ : function_type_(b.function_type_) {
+ b.function_type_ = nullptr;
+ }
+
+ FunctionTypeBuilder& operator=(FunctionTypeBuilder&& b) {
+ if (this != &b) {
+ function_type_ = b.function_type_;
+ b.function_type_ = nullptr;
+ }
+ return *this;
+ }
+
+ FunctionTypeBuilder(Zone* zone, Type* return_type)
+ : function_type_(Type::Function(zone, return_type)) {}
+
+ private:
+ static void AddAllArguments(AsmFunctionType*) {}
+
+ template <typename Arg, typename... Others>
+ static void AddAllArguments(AsmFunctionType* function_type, Arg* arg,
+ Others... others) {
+ CHECK(function_type != nullptr);
+ function_type->AddArgument((*arg)());
+ AddAllArguments(function_type, others...);
+ }
+
+ public:
+ template <typename... Args>
+ Type* operator()(Args... args) {
+ Type* ret = function_type_;
+ function_type_ = nullptr;
+ AddAllArguments(ret->AsFunctionType(), args...);
+ return ret;
+ }
+
+ private:
+ Type* function_type_;
+ };
+
+ FunctionTypeBuilder Function(Type* (*return_type)()) {
+ return FunctionTypeBuilder(zone(), (*return_type)());
+ }
+
+ template <typename... Overloads>
+ Type* Overload(Overloads... overloads) {
+ auto* ret = Type::OverloadedFunction(zone());
+ AddAllOverloads(ret->AsOverloadedFunctionType(), overloads...);
+ return ret;
+ }
+
+ private:
+ static void AddAllOverloads(AsmOverloadedFunctionType*) {}
+
+ template <typename Overload, typename... Others>
+ static void AddAllOverloads(AsmOverloadedFunctionType* function,
+ Overload* overload, Others... others) {
+ CHECK(function != nullptr);
+ function->AddOverload(overload);
+ AddAllOverloads(function, others...);
+ }
+
+ const std::unordered_map<Type*, std::unordered_set<Type*>> parents_;
+};
+
+// AsmValueTypeParents expose the bitmasks for the parents for each value type
+// in asm's type system. It inherits from AsmValueType so that the kAsm<Foo>
+// members are available when expanding the FOR_EACH_ASM_VALUE_TYPE_LIST macro.
+class AsmValueTypeParents : private AsmValueType {
+ public:
+ enum : uint32_t {
+#define V(CamelName, string_name, number, parent_types) \
+ CamelName = parent_types,
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueTypeParents);
+};
+
+TEST_F(AsmTypeTest, ValidateBits) {
+ // Generic validation tests for the bits in the type system's type
+ // definitions.
+
+ std::unordered_set<Type*> seen_types;
+ std::unordered_set<uint32_t> seen_numbers;
+ uint32_t total_types = 0;
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ ++total_types; \
+ if (AsmValueTypeParents::CamelName != 0) { \
+ EXPECT_NE(0, ParentsOf(AsmType::CamelName()).size()) << #CamelName; \
+ } \
+ seen_types.insert(Type::CamelName()); \
+ seen_numbers.insert(number); \
+ /* Every ASM type must have a valid number. */ \
+ EXPECT_NE(0, number) << Type::CamelName()->Name(); \
+ /* Inheritance cycles - unlikely, but we're paranoid and check for it */ \
+ /* anyways.*/ \
+ EXPECT_EQ(0, (1 << (number)) & AsmValueTypeParents::CamelName); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ // At least one type was expanded.
+ EXPECT_GT(total_types, 0u);
+
+ // Each value type is unique.
+ EXPECT_EQ(total_types, seen_types.size());
+
+ // Each number is unique.
+ EXPECT_EQ(total_types, seen_numbers.size());
+}
+
+TEST_F(AsmTypeTest, SaneParentsMap) {
+ // This test ensures our parents map contains all the parents types that are
+ // specified in the types' declaration. It does not report bogus inheritance.
+
+ // Handy-dandy lambda for counting bits. Code borrowed from stack overflow.
+ auto NumberOfSetBits = [](uintptr_t parent_mask) -> uint32_t {
+ uint32_t parent_mask32 = static_cast<uint32_t>(parent_mask);
+ CHECK_EQ(parent_mask, parent_mask32);
+ parent_mask32 = parent_mask32 - ((parent_mask32 >> 1) & 0x55555555);
+ parent_mask32 =
+ (parent_mask32 & 0x33333333) + ((parent_mask32 >> 2) & 0x33333333);
+ return (((parent_mask32 + (parent_mask32 >> 4)) & 0x0F0F0F0F) *
+ 0x01010101) >>
+ 24;
+ };
+
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ const uintptr_t parents = \
+ reinterpret_cast<uintptr_t>(Type::CamelName()) & ~(1 << (number)); \
+ EXPECT_EQ(NumberOfSetBits(parents), \
+ 1 + ParentsOf(Type::CamelName()).size()) \
+ << Type::CamelName()->Name() << ", parents " \
+ << reinterpret_cast<void*>(parents) << ", type " \
+ << static_cast<void*>(Type::CamelName()); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+}
+
+TEST_F(AsmTypeTest, Names) {
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ EXPECT_THAT(Type::CamelName()->Name(), StrEq(string_name)); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ EXPECT_THAT(Function(Type::Int)(Type::Double, Type::Float)->Name(),
+ StrEq("(double, float) -> int"));
+
+ EXPECT_THAT(Overload(Function(Type::Int)(Type::Double, Type::Float),
+ Function(Type::Int)(Type::Int))
+ ->Name(),
+ StrEq("(double, float) -> int /\\ (int) -> int"));
+
+ EXPECT_THAT(Type::FroundType(zone())->Name(), StrEq("fround"));
+
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Signed(), Type::Int())->Name(),
+ StrEq("(int, int...) -> signed"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Float(), Type::Floatish())->Name(),
+ StrEq("(floatish, floatish...) -> float"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Double(), Type::DoubleQ())->Name(),
+ StrEq("(double?, double?...) -> double"));
+
+ EXPECT_THAT(Type::FFIType(zone())->Name(), StrEq("Function"));
+
+ auto* ft =
+ Type::FunctionTableType(zone(), 15, Function(Type::Double)(Type::Int));
+ EXPECT_THAT(ft->Name(), StrEq("((int) -> double)[15]"));
+}
+
+TEST_F(AsmTypeTest, IsExactly) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ EXPECT_EQ(ii == jj, test_types[ii]->IsExactly(test_types[jj]))
+ << test_types[ii]->Name()
+ << ((ii == jj) ? " is not exactly " : " is exactly ")
+ << test_types[jj]->Name();
+ }
+ }
+}
+
+bool FunctionsWithSameSignature(AsmType* a, AsmType* b) {
+ if (a->AsFunctionType()) {
+ if (b->AsFunctionType()) {
+ return a->IsA(b);
+ }
+ }
+ return false;
+}
+
+TEST_F(AsmTypeTest, IsA) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ const bool Expected =
+ (ii == jj) || ParentsOf(test_types[ii]).count(test_types[jj]) != 0 ||
+ FunctionsWithSameSignature(test_types[ii], test_types[jj]);
+ EXPECT_EQ(Expected, test_types[ii]->IsA(test_types[jj]))
+ << test_types[ii]->Name() << (Expected ? " is not a " : " is a ")
+ << test_types[jj]->Name();
+ }
+ }
+
+ EXPECT_TRUE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Int)(Type::Int, Type::Int)));
+
+ EXPECT_FALSE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Double)(Type::Int, Type::Int)));
+ EXPECT_FALSE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Int)(Type::Double, Type::Int)));
+}
+
+TEST_F(AsmTypeTest, CanBeInvokedWith) {
+ auto* min_max_int = Type::MinMaxType(zone(), Type::Signed(), Type::Int());
+ auto* i2s = Function(Type::Signed)(Type::Int);
+ auto* ii2s = Function(Type::Signed)(Type::Int, Type::Int);
+ auto* iii2s = Function(Type::Signed)(Type::Int, Type::Int, Type::Int);
+ auto* iiii2s =
+ Function(Type::Signed)(Type::Int, Type::Int, Type::Int, Type::Int);
+
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ i2s->AsFunctionType()->ReturnType(), i2s->AsFunctionType()->Arguments()));
+
+ auto* min_max_double =
+ Type::MinMaxType(zone(), Type::Double(), Type::Double());
+ auto* d2d = Function(Type::Double)(Type::Double);
+ auto* dd2d = Function(Type::Double)(Type::Double, Type::Double);
+ auto* ddd2d =
+ Function(Type::Double)(Type::Double, Type::Double, Type::Double);
+ auto* dddd2d = Function(Type::Double)(Type::Double, Type::Double,
+ Type::Double, Type::Double);
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ d2d->AsFunctionType()->ReturnType(), d2d->AsFunctionType()->Arguments()));
+
+ auto* min_max = Overload(min_max_int, min_max_double);
+ EXPECT_FALSE(min_max->AsCallableType()->CanBeInvokedWith(
+ i2s->AsFunctionType()->ReturnType(), i2s->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max->AsCallableType()->CanBeInvokedWith(
+ d2d->AsFunctionType()->ReturnType(), d2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+
+ auto* fround = Type::FroundType(zone());
+
+ ZoneVector<AsmType*> arg(zone());
+ arg.push_back(Type::Floatish());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FloatQ());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Float());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::DoubleQ());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Double());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Signed());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Unsigned());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FixNum());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+
+ auto* idf2v = Function(Type::Void)(Type::Int, Type::Double, Type::Float);
+ auto* i2d = Function(Type::Double)(Type::Int);
+ auto* i2f = Function(Type::Float)(Type::Int);
+ auto* fi2d = Function(Type::Double)(Type::Float, Type::Int);
+ auto* idif2i =
+ Function(Type::Int)(Type::Int, Type::Double, Type::Int, Type::Float);
+ auto* overload = Overload(idf2v, i2f, /*i2d missing, */ fi2d, idif2i);
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ idf2v->AsFunctionType()->ReturnType(),
+ idf2v->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ i2f->AsFunctionType()->ReturnType(), i2f->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ idif2i->AsFunctionType()->ReturnType(),
+ idif2i->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(overload->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(i2f->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+
+ auto* ffi = Type::FFIType(zone());
+ AsmType* (*kReturnTypes[])() = {
+ Type::Void, Type::Double, Type::Signed,
+ };
+ AsmType* (*kParameterTypes[])() = {
+ Type::Double, Type::Signed, Type::FixNum,
+ };
+ for (size_t ii = 0; ii < arraysize(kReturnTypes); ++ii) {
+ for (size_t jj = 0; jj < arraysize(kParameterTypes); ++jj) {
+ auto* f = Function(kReturnTypes[ii])(kParameterTypes[jj]);
+ EXPECT_TRUE(ffi->AsCallableType()->CanBeInvokedWith(
+ f->AsFunctionType()->ReturnType(), f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+
+ // Call with non-parameter type type should fail.
+ f = Function(kReturnTypes[ii])(kParameterTypes[jj], Type::Int);
+ EXPECT_FALSE(ffi->AsCallableType()->CanBeInvokedWith(
+ f->AsFunctionType()->ReturnType(), f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+ }
+ }
+
+ auto* ft0 = Type::FunctionTableType(zone(), 10, fi2d);
+ EXPECT_TRUE(ft0->AsCallableType()->CanBeInvokedWith(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(ft0->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+}
+
+TEST_F(AsmTypeTest, ToReturnType) {
+ std::unordered_map<AsmType*, AsmType*> kToReturnType = {
+ {Type::Signed(), Type::Signed()}, {Type::FixNum(), Type::Signed()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ {Type::Void(), Type::Void()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* return_type = Type::None();
+ auto to_return_type_iter = kToReturnType.find(test_types[ii]);
+ if (to_return_type_iter != kToReturnType.end()) {
+ return_type = to_return_type_iter->second;
+ }
+ EXPECT_EQ(return_type, test_types[ii]->ToReturnType())
+ << return_type->Name() << " != " << test_types[ii]->ToReturnType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsReturnType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> return_types{
+ Type::Double(), Type::Signed(), Type::Float(), Type::Void(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsReturnType = return_types.count(test_types[ii]);
+ EXPECT_EQ(IsReturnType, test_types[ii]->IsReturnType())
+ << test_types[ii]->Name()
+ << (IsReturnType ? " is not a return type" : " is a return type");
+ }
+}
+
+TEST_F(AsmTypeTest, ToParameterType) {
+ std::unordered_map<AsmType*, AsmType*> kToParameterType = {
+ {Type::Int(), Type::Int()}, {Type::Signed(), Type::Int()},
+ {Type::Unsigned(), Type::Int()}, {Type::FixNum(), Type::Int()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* parameter_type = Type::None();
+ auto to_parameter_type_iter = kToParameterType.find(test_types[ii]);
+ if (to_parameter_type_iter != kToParameterType.end()) {
+ parameter_type = to_parameter_type_iter->second;
+ }
+ EXPECT_EQ(parameter_type, test_types[ii]->ToParameterType())
+ << parameter_type->Name()
+ << " != " << test_types[ii]->ToParameterType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsParameterType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> parameter_types{
+ Type::Double(), Type::Int(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsParameterType = parameter_types.count(test_types[ii]);
+ EXPECT_EQ(IsParameterType, test_types[ii]->IsParameterType())
+ << test_types[ii]->Name()
+ << (IsParameterType ? " is not a parameter type"
+ : " is a parameter type");
+ }
+}
+
+TEST_F(AsmTypeTest, IsComparableType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> comparable_types{
+ Type::Double(), Type::Signed(), Type::Unsigned(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsComparableType = comparable_types.count(test_types[ii]);
+ EXPECT_EQ(IsComparableType, test_types[ii]->IsComparableType())
+ << test_types[ii]->Name()
+ << (IsComparableType ? " is not a comparable type"
+ : " is a comparable type");
+ }
+}
+
+TEST_F(AsmTypeTest, ElementSizeInBytes) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto ElementSizeInBytesForType = [](Type* type) -> int32_t {
+ if (type == Type::Int8Array() || type == Type::Uint8Array()) {
+ return 1;
+ }
+ if (type == Type::Int16Array() || type == Type::Uint16Array()) {
+ return 2;
+ }
+ if (type == Type::Int32Array() || type == Type::Uint32Array() ||
+ type == Type::Float32Array()) {
+ return 4;
+ }
+ if (type == Type::Float64Array()) {
+ return 8;
+ }
+ return -1;
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(ElementSizeInBytesForType(test_types[ii]),
+ test_types[ii]->ElementSizeInBytes());
+ }
+}
+
+TEST_F(AsmTypeTest, LoadType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto LoadTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::DoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(LoadTypeForType(test_types[ii]), test_types[ii]->LoadType());
+ }
+}
+
+TEST_F(AsmTypeTest, StoreType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto StoreTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatishDoubleQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::FloatQDoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(StoreTypeForType(test_types[ii]), test_types[ii]->StoreType())
+ << test_types[ii]->Name();
+ }
+}
+
+} // namespace
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index 0b1b79ea86..7311f063a0 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -18,9 +18,13 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define B1(a) kExprBlock, a, kExprEnd
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+
static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
-static const byte kCodeSetLocal0[] = {kExprSetLocal, 0, kExprI8Const, 0};
+static const byte kCodeSetLocal0[] = {WASM_SET_LOCAL(0, WASM_ZERO)};
static const LocalType kLocalTypes[] = {kAstI32, kAstI64, kAstF32, kAstF64};
static const MachineType machineTypes[] = {
@@ -36,7 +40,7 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
kExprI32LeS, kExprI32LtU, kExprI32LeU};
#define WASM_BRV_IF_ZERO(depth, val) \
- kExprBrIf, static_cast<byte>(depth), val, WASM_ZERO
+ val, WASM_ZERO, kExprBrIf, ARITY_1, static_cast<byte>(depth)
#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
@@ -48,7 +52,6 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
Verify(kSuccess, env, code, code + arraysize(code)); \
} while (false)
-
#define EXPECT_FAILURE_INLINE(env, ...) \
do { \
static byte code[] = {__VA_ARGS__}; \
@@ -65,7 +68,8 @@ class AstDecoderTest : public TestWithZone {
public:
typedef std::pair<uint32_t, LocalType> LocalsDecl;
- AstDecoderTest() : module(nullptr) {}
+ AstDecoderTest() : module(nullptr), local_decls(zone()) {}
+
TestSignatures sigs;
ModuleEnv* module;
LocalDeclEncoder local_decls;
@@ -74,13 +78,13 @@ class AstDecoderTest : public TestWithZone {
local_decls.AddLocals(count, type);
}
- // Preprends local variable declarations and renders nice error messages for
+ // Prepends local variable declarations and renders nice error messages for
// verification failures.
void Verify(ErrorCode expected, FunctionSig* sig, const byte* start,
const byte* end) {
- local_decls.Prepend(&start, &end);
+ local_decls.Prepend(zone(), &start, &end);
// Verify the code.
- TreeResult result =
+ DecodeResult result =
VerifyWasmCode(zone()->allocator(), module, sig, start, end);
if (result.error_code != expected) {
@@ -101,14 +105,11 @@ class AstDecoderTest : public TestWithZone {
}
FATAL(str.str().c_str());
}
-
- delete[] start; // local_decls.Prepend() allocated a new buffer.
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
// op(local[0], local[1])
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
- 1};
+ byte code[] = {WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
EXPECT_VERIFIES(success, code);
// Try all combinations of return and parameter types.
@@ -134,7 +135,7 @@ class AstDecoderTest : public TestWithZone {
void TestUnop(WasmOpcode opcode, LocalType ret_type, LocalType param_type) {
// Return(op(local[0]))
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
+ byte code[] = {WASM_UNOP(opcode, WASM_GET_LOCAL(0))};
{
LocalType types[] = {ret_type, param_type};
FunctionSig sig(1, 1, types);
@@ -155,7 +156,6 @@ class AstDecoderTest : public TestWithZone {
}
};
-
TEST_F(AstDecoderTest, Int8Const) {
byte code[] = {kExprI8Const, 0};
for (int i = -128; i < 128; i++) {
@@ -176,12 +176,6 @@ TEST_F(AstDecoderTest, IncompleteIf1) {
EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IncompleteIf2) {
- byte code[] = {kExprIf, kExprI8Const, 0};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
-}
-
TEST_F(AstDecoderTest, Int8Const_fallthru) {
byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
EXPECT_VERIFIES(sigs.i_i(), code);
@@ -303,199 +297,294 @@ TEST_F(AstDecoderTest, Binops_off_end) {
EXPECT_FAILURE(sigs.i_i(), code1);
}
- byte code3[] = {0, kExprGetLocal, 0}; // [opcode] [expr]
+ byte code3[] = {kExprGetLocal, 0, 0}; // [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
- code3[0] = kInt32BinopOpcodes[i];
+ code3[2] = kInt32BinopOpcodes[i];
EXPECT_FAILURE(sigs.i_i(), code3);
}
- byte code4[] = {0, kExprGetLocal, 0, 0}; // [opcode] [expr] [opcode]
+ byte code4[] = {kExprGetLocal, 0, 0, 0}; // [expr] [opcode] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
- code4[0] = kInt32BinopOpcodes[i];
+ code4[2] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
EXPECT_FAILURE(sigs.i_i(), code4);
}
}
+TEST_F(AstDecoderTest, BinopsAcrossBlock1) {
+ static const byte code[] = {WASM_ZERO, kExprBlock, WASM_ZERO, kExprI32Add,
+ kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, BinopsAcrossBlock2) {
+ static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprBlock, kExprI32Add,
+ kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, BinopsAcrossBlock3) {
+ static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprIf, kExprI32Add,
+ kExprElse, kExprI32Add, kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
-//===================================================================
-//== Statements
-//===================================================================
TEST_F(AstDecoderTest, Nop) {
static const byte code[] = {kExprNop};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, SetLocal0_param) {
- static const byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.i_i(), kCodeSetLocal0);
+ EXPECT_FAILURE(sigs.f_ff(), kCodeSetLocal0);
+ EXPECT_FAILURE(sigs.d_dd(), kCodeSetLocal0);
}
TEST_F(AstDecoderTest, SetLocal0_local) {
- byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_FAILURE(sigs.i_v(), kCodeSetLocal0);
AddLocals(kAstI32, 1);
- EXPECT_VERIFIES(sigs.i_v(), code);
+ EXPECT_VERIFIES(sigs.i_v(), kCodeSetLocal0);
}
TEST_F(AstDecoderTest, SetLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
- byte code[] = {kExprSetLocal, j, kExprI8Const, i};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_SET_LOCAL(j, WASM_I8(i)));
}
}
}
+TEST_F(AstDecoderTest, BlockN) {
+ const int kMaxSize = 200;
+ byte buffer[kMaxSize + 2];
+
+ for (int i = 0; i <= kMaxSize; i++) {
+ memset(buffer, kExprNop, sizeof(buffer));
+ buffer[0] = kExprBlock;
+ buffer[i + 1] = kExprEnd;
+ Verify(kSuccess, sigs.v_i(), buffer, buffer + i + 2);
+ }
+}
+
TEST_F(AstDecoderTest, Block0) {
- static const byte code[] = {kExprBlock, 0};
+ static const byte code[] = {kExprBlock, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, Block0_fallthru1) {
- static const byte code[] = {kExprBlock, 0, kExprBlock, 0};
+ static const byte code[] = {kExprBlock, kExprBlock, kExprEnd, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, Block0Block0) {
+ static const byte code[] = {kExprBlock, kExprEnd, kExprBlock, kExprEnd};
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, Block0_end_end) {
+ static const byte code[] = {kExprBlock, kExprEnd, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, Block1) {
- static const byte code[] = {kExprBlock, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ byte code[] = {B1(WASM_SET_LOCAL(0, WASM_ZERO))};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
}
-TEST_F(AstDecoderTest, Block0_fallthru2) {
- static const byte code[] = {kExprBlock, 0, kExprSetLocal, 0, kExprI8Const, 0};
+TEST_F(AstDecoderTest, Block1_i) {
+ byte code[] = {B1(WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
+ EXPECT_FAILURE(sigs.l_ll(), code);
+}
+
+TEST_F(AstDecoderTest, Block1_f) {
+ byte code[] = {B1(WASM_F32(0))};
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
+ EXPECT_FAILURE(sigs.l_ll(), code);
+}
+
+TEST_F(AstDecoderTest, Block1_continue) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(1)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(2)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(3)));
+}
+
+TEST_F(AstDecoderTest, Block1_br) {
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_1, DEPTH_0,
+ kExprEnd);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_0, DEPTH_0,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, Block2_br) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_NOP, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_NOP));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Block2) {
- static const byte code[] = {kExprBlock, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B2(WASM_NOP, WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)));
+}
+
+TEST_F(AstDecoderTest, Block2b) {
+ byte code[] = {B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
TEST_F(AstDecoderTest, Block2_fallthru) {
- static const byte code[] = {kExprBlock, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprI8Const, 11}; // --
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_I8(23));
}
-TEST_F(AstDecoderTest, BlockN) {
- byte block[] = {kExprBlock, 2};
+TEST_F(AstDecoderTest, Block3) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(), B3(WASM_SET_LOCAL(0, WASM_ZERO), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_I8(11)));
+}
- for (size_t i = 0; i < 10; i++) {
- size_t total = sizeof(block) + sizeof(kCodeSetLocal0) * i;
- byte* code = reinterpret_cast<byte*>(malloc(total));
- memcpy(code, block, sizeof(block));
- code[1] = static_cast<byte>(i);
- for (size_t j = 0; j < i; j++) {
- memcpy(code + sizeof(block) + j * sizeof(kCodeSetLocal0), kCodeSetLocal0,
- sizeof(kCodeSetLocal0));
- }
- Verify(kSuccess, sigs.v_i(), code, code + total);
- free(code);
- }
+TEST_F(AstDecoderTest, Block5) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), B1(WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), B2(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), B3(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+}
+
+TEST_F(AstDecoderTest, BlockF32) {
+ static const byte code[] = {kExprBlock, kExprF32Const, 0, 0, 0, 0, kExprEnd};
+ EXPECT_VERIFIES(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
}
TEST_F(AstDecoderTest, BlockN_off_end) {
- for (byte i = 2; i < 10; i++) {
- byte code[] = {kExprBlock, i, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+ byte code[] = {kExprBlock, kExprNop, kExprNop, kExprNop, kExprNop, kExprEnd};
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ for (size_t i = 1; i < arraysize(code); i++) {
+ Verify(kError, sigs.v_v(), code, code + i);
}
}
-TEST_F(AstDecoderTest, Block1_break) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, Block2_continue) {
+ static const byte code[] = {kExprBlock, kExprBr, ARITY_0,
+ DEPTH_1, kExprNop, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
}
-TEST_F(AstDecoderTest, Block2_break) {
- static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, NestedBlock_return) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B1(B1(WASM_RETURN1(WASM_ZERO))));
}
-TEST_F(AstDecoderTest, Block1_continue) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+TEST_F(AstDecoderTest, BlockBinop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(B1(WASM_I8(1)), WASM_I8(2)));
}
-TEST_F(AstDecoderTest, Block2_continue) {
- static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+TEST_F(AstDecoderTest, BlockBrBinop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_I32_AND(B1(WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
}
-TEST_F(AstDecoderTest, ExprBlock0) {
- static const byte code[] = {kExprBlock, 0};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, If_empty1) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1a) {
- static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+TEST_F(AstDecoderTest, If_empty2) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1b) {
- static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_FAILURE(sigs.f_ff(), code);
+TEST_F(AstDecoderTest, If_empty3) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, WASM_ZERO, kExprElse,
+ kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1c) {
- static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
- EXPECT_VERIFIES(sigs.f_ff(), code);
+TEST_F(AstDecoderTest, If_empty4) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, WASM_ZERO,
+ kExprEnd);
}
-TEST_F(AstDecoderTest, IfEmpty) {
- static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_empty_stack) {
+ byte code[] = {kExprIf};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfSet) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprSetLocal,
- 0, kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_incomplete1) {
+ byte code[] = {kExprI8Const, 0, kExprIf};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfBlock1) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprBlock,
- 1, kExprSetLocal, 0, kExprI8Const,
- 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_incomplete2) {
+ byte code[] = {kExprI8Const, 0, kExprIf, kExprNop};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfBlock2) {
- static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprBlock,
- 2, kExprSetLocal, 0, kExprI8Const,
- 0, kExprSetLocal, 0, kExprI8Const,
- 0};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_else_else) {
+ byte code[] = {kExprI8Const, 0, kExprIf, kExprElse, kExprElse, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfEmpty) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprGetLocal, 0, kExprIf, kExprEnd);
+}
+
+TEST_F(AstDecoderTest, IfSet) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
}
TEST_F(AstDecoderTest, IfElseEmpty) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprNop,
- kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprIf, kExprElse,
+ kExprEnd);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
}
-TEST_F(AstDecoderTest, IfElseSet) {
- static const byte code[] = {kExprIfElse,
- kExprGetLocal,
- 0, // --
- kExprSetLocal,
- 0,
- kExprI8Const,
- 0, // --
- kExprSetLocal,
- 0,
- kExprI8Const,
- 1}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, IfElseUnreachable1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
}
-TEST_F(AstDecoderTest, IfElseUnreachable) {
- static const byte code[] = {kExprIfElse, kExprI8Const, 0,
- kExprUnreachable, kExprGetLocal, 0};
+TEST_F(AstDecoderTest, IfElseUnreachable2) {
+ static const byte code[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType types[] = {kAstI32, kLocalTypes[i]};
@@ -509,66 +598,136 @@ TEST_F(AstDecoderTest, IfElseUnreachable) {
}
}
+TEST_F(AstDecoderTest, IfBreak) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
+}
+
+TEST_F(AstDecoderTest, IfElseBreak) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1)));
+}
+
+TEST_F(AstDecoderTest, Block_else) {
+ byte code[] = {kExprI8Const, 0, kExprBlock, kExprElse, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfNop) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfNopElseNop) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, If_end_end) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprEnd, kExprEnd};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, If_falloff) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfElse_falloff) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprNop, kExprElse};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfElseNop) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfBlock1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ B1(WASM_SET_LOCAL(0, WASM_ZERO)), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfBlock1b) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, IfBlock2a) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, IfBlock2b) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfElseSet) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_I8(1))));
+}
+
TEST_F(AstDecoderTest, Loop0) {
- static const byte code[] = {kExprLoop, 0};
+ static const byte code[] = {kExprLoop, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, Loop1) {
- static const byte code[] = {kExprLoop, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ static const byte code[] = {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO))};
EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
TEST_F(AstDecoderTest, Loop2) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)));
}
TEST_F(AstDecoderTest, Loop1_continue) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop1_break) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 1, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
}
TEST_F(AstDecoderTest, Loop2_continue) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 0, kExprNop}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop2_break) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 1, kExprNop}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1)));
}
TEST_F(AstDecoderTest, ExprLoop0) {
- static const byte code[] = {kExprLoop, 0};
+ static const byte code[] = {kExprLoop, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, ExprLoop1a) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
}
TEST_F(AstDecoderTest, ExprLoop1b) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
}
TEST_F(AstDecoderTest, ExprLoop2_unreachable) {
- static const byte code[] = {kExprLoop, 2, kExprBr, 0,
- kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BR(0), WASM_NOP));
}
TEST_F(AstDecoderTest, ReturnVoid1) {
@@ -579,7 +738,7 @@ TEST_F(AstDecoderTest, ReturnVoid1) {
}
TEST_F(AstDecoderTest, ReturnVoid2) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ static const byte code[] = {kExprBlock, kExprBr, ARITY_0, DEPTH_0, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
EXPECT_FAILURE(sigs.i_f(), code);
@@ -598,67 +757,85 @@ TEST_F(AstDecoderTest, ReturnVoid3) {
TEST_F(AstDecoderTest, Unreachable1) {
EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable);
EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable, kExprUnreachable);
- EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0), WASM_ZERO));
}
-TEST_F(AstDecoderTest, Codeiness) {
- VERIFY(kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 0, kExprNop); // --
+TEST_F(AstDecoderTest, Unreachable_binop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO));
}
-TEST_F(AstDecoderTest, ExprIf1) {
- VERIFY(kExprIf, kExprGetLocal, 0, kExprI8Const, 0, kExprI8Const, 1);
- VERIFY(kExprIf, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0);
- VERIFY(kExprIf, kExprGetLocal, 0, kExprI32Add, kExprGetLocal, 0,
- kExprGetLocal, 0, kExprI8Const, 1);
+TEST_F(AstDecoderTest, Unreachable_select) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE));
}
-TEST_F(AstDecoderTest, ExprIf_off_end) {
- static const byte kCode[] = {kExprIf, kExprGetLocal, 0, kExprGetLocal,
- 0, kExprGetLocal, 0};
- for (size_t len = 1; len < arraysize(kCode); len++) {
+TEST_F(AstDecoderTest, If1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(8)));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9),
+ WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_I8(8)));
+}
+
+TEST_F(AstDecoderTest, If_off_end) {
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))};
+ for (size_t len = 3; len < arraysize(kCode); len++) {
Verify(kError, sigs.i_i(), kCode, kCode + len);
}
}
-TEST_F(AstDecoderTest, ExprIf_type) {
- {
- // float|double ? 1 : 2
- static const byte kCode[] = {kExprIfElse, kExprGetLocal, 0, kExprI8Const,
- 1, kExprI8Const, 2};
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
- }
- {
- // 1 ? float|double : 2
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 1, kExprGetLocal,
- 0, kExprI8Const, 2};
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
- }
- {
- // stmt ? 0 : 1
- static const byte kCode[] = {kExprIfElse, kExprNop, kExprI8Const,
- 0, kExprI8Const, 1};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
- {
- // 0 ? stmt : 1
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 0,
- kExprNop, kExprI8Const, 1};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
- {
- // 0 ? 1 : stmt
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 0, kExprI8Const, 1,
- 0, kExprBlock};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
+TEST_F(AstDecoderTest, If_type1) {
+ // float|double ? 1 : 2
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(0), WASM_I8(2))};
+ EXPECT_VERIFIES(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type2) {
+ // 1 ? float|double : 2
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_I8(1), WASM_GET_LOCAL(0), WASM_I8(1))};
+ EXPECT_VERIFIES(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type3) {
+ // stmt ? 0 : 1
+ static const byte kCode[] = {WASM_IF_ELSE(WASM_NOP, WASM_I8(0), WASM_I8(1))};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type4) {
+ // 0 ? stmt : 1
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_I8(1))};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type5) {
+ // 0 ? 1 : stmt
+ static const byte kCode[] = {WASM_IF_ELSE(WASM_ZERO, WASM_I8(1), WASM_NOP)};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
}
TEST_F(AstDecoderTest, Int64Local_param) {
@@ -669,8 +846,7 @@ TEST_F(AstDecoderTest, Int64Locals) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI64, 1);
for (byte j = 0; j < i; j++) {
- byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES(sigs.l_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.l_v(), WASM_GET_LOCAL(j));
}
}
}
@@ -737,53 +913,50 @@ TEST_F(AstDecoderTest, MacrosStmt) {
VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
VERIFY(WASM_NOP);
- VERIFY(WASM_BLOCK(1, WASM_NOP));
- VERIFY(WASM_LOOP(1, WASM_NOP));
- VERIFY(WASM_LOOP(1, WASM_BREAK(0)));
- VERIFY(WASM_LOOP(1, WASM_CONTINUE(0)));
+ VERIFY(B1(WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_BREAK(0)));
+ VERIFY(WASM_LOOP(WASM_CONTINUE(0)));
}
TEST_F(AstDecoderTest, MacrosBreak) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BREAK(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BREAK(0)));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.l_l(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_I64V_1(0))));
- EXPECT_VERIFIES_INLINE(sigs.f_ff(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_F32(0.0))));
- EXPECT_VERIFIES_INLINE(sigs.d_dd(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BREAKV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.l_l(), WASM_LOOP(WASM_BREAKV(0, WASM_I64V_1(0))));
+ EXPECT_VERIFIES_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BREAKV(0, WASM_F32(0.0))));
+ EXPECT_VERIFIES_INLINE(sigs.d_dd(), WASM_LOOP(WASM_BREAKV(0, WASM_F64(0.0))));
}
TEST_F(AstDecoderTest, MacrosContinue) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_CONTINUE(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_CONTINUE(0)));
}
TEST_F(AstDecoderTest, MacrosVariadic) {
- VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_NOP));
- VERIFY(WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(2, WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(B2(WASM_NOP, WASM_NOP));
+ VERIFY(B3(WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
}
TEST_F(AstDecoderTest, MacrosNestedBlocks) {
- VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_BLOCK(2, WASM_NOP, WASM_NOP)));
- VERIFY(WASM_BLOCK(3, WASM_NOP, // --
- WASM_BLOCK(2, WASM_NOP, WASM_NOP), // --
- WASM_BLOCK(2, WASM_NOP, WASM_NOP))); // --
- VERIFY(WASM_BLOCK(1, WASM_BLOCK(1, WASM_BLOCK(2, WASM_NOP, WASM_NOP))));
+ VERIFY(B2(WASM_NOP, B2(WASM_NOP, WASM_NOP)));
+ VERIFY(B3(WASM_NOP, // --
+ B2(WASM_NOP, WASM_NOP), // --
+ B2(WASM_NOP, WASM_NOP))); // --
+ VERIFY(B1(B1(B2(WASM_NOP, WASM_NOP))));
}
TEST_F(AstDecoderTest, MultipleReturn) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
- EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURNN(1, WASM_ZERO));
FunctionSig sig_iii_v(3, 0, kIntTypes5);
EXPECT_VERIFIES_INLINE(&sig_iii_v,
- WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I8(44)));
- EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I8(44)));
+ EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
}
TEST_F(AstDecoderTest, MultipleReturn_fallthru) {
@@ -887,78 +1060,57 @@ TEST_F(AstDecoderTest, MemorySize) {
}
TEST_F(AstDecoderTest, GrowMemory) {
- byte code[] = {kExprGrowMemory, kExprGetLocal, 0};
+ byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
EXPECT_VERIFIES(sigs.i_i(), code);
EXPECT_FAILURE(sigs.i_d(), code);
}
TEST_F(AstDecoderTest, LoadMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {kExprI32LoadMem, ZERO_ALIGNMENT, static_cast<byte>(offset),
- kExprI8Const, 0};
+ byte code[] = {kExprI8Const, 0, kExprI32LoadMem, ZERO_ALIGNMENT,
+ static_cast<byte>(offset)};
EXPECT_VERIFIES(sigs.i_i(), code);
}
}
TEST_F(AstDecoderTest, StoreMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {
- kExprI32StoreMem, 0, static_cast<byte>(offset), kExprI8Const, 0,
- kExprI8Const, 0};
+ byte code[] = {WASM_STORE_MEM_OFFSET(MachineType::Int32(), offset,
+ WASM_ZERO, WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-TEST_F(AstDecoderTest, LoadMemOffset_varint) {
- byte code1[] = {kExprI32LoadMem, ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const,
- 0};
- byte code2[] = {kExprI32LoadMem, ZERO_ALIGNMENT, 0x80, 1, kExprI8Const, 0};
- byte code3[] = {
- kExprI32LoadMem, ZERO_ALIGNMENT, 0x81, 0x82, 5, kExprI8Const, 0};
- byte code4[] = {
- kExprI32LoadMem, ZERO_ALIGNMENT, 0x83, 0x84, 0x85, 7, kExprI8Const, 0};
+#define BYTE0(x) ((x)&0x7F)
+#define BYTE1(x) ((x >> 7) & 0x7F)
+#define BYTE2(x) ((x >> 14) & 0x7F)
+#define BYTE3(x) ((x >> 21) & 0x7F)
- EXPECT_VERIFIES(sigs.i_i(), code1);
- EXPECT_VERIFIES(sigs.i_i(), code2);
- EXPECT_VERIFIES(sigs.i_i(), code3);
- EXPECT_VERIFIES(sigs.i_i(), code4);
+#define VARINT1(x) BYTE0(x)
+#define VARINT2(x) BYTE0(x) | 0x80, BYTE1(x)
+#define VARINT3(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x)
+#define VARINT4(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x) | 0x80, BYTE3(x)
+
+TEST_F(AstDecoderTest, LoadMemOffset_varint) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT1(0x45));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT2(0x3999));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT3(0x344445));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT4(0x36666667));
}
TEST_F(AstDecoderTest, StoreMemOffset_varint) {
- byte code1[] = {
- kExprI32StoreMem, ZERO_ALIGNMENT, 0, kExprI8Const, 0, kExprI8Const, 0};
- byte code2[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x80,
- 1,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
- byte code3[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x81,
- 0x82,
- 5,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
- byte code4[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x83,
- 0x84,
- 0x85,
- 7,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
-
- EXPECT_VERIFIES(sigs.i_i(), code1);
- EXPECT_VERIFIES(sigs.i_i(), code2);
- EXPECT_VERIFIES(sigs.i_i(), code3);
- EXPECT_VERIFIES(sigs.i_i(), code4);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT1(0x33));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT2(0x1111));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT3(0x222222));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT4(0x44444444));
}
TEST_F(AstDecoderTest, AllLoadMemCombinations) {
@@ -966,9 +1118,7 @@ TEST_F(AstDecoderTest, AllLoadMemCombinations) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- byte code[] = {
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, false)),
- ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const, 0};
+ byte code[] = {WASM_LOAD_MEM(mem_type, WASM_ZERO)};
FunctionSig sig(1, 0, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
EXPECT_VERIFIES(&sig, code);
@@ -984,14 +1134,7 @@ TEST_F(AstDecoderTest, AllStoreMemCombinations) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- byte code[] = {
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, true)),
- ZERO_ALIGNMENT,
- ZERO_OFFSET,
- kExprI8Const,
- 0,
- kExprGetLocal,
- 0};
+ byte code[] = {WASM_STORE_MEM(mem_type, WASM_ZERO, WASM_GET_LOCAL(0))};
FunctionSig sig(0, 1, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
EXPECT_VERIFIES(&sig, code);
@@ -1002,7 +1145,6 @@ TEST_F(AstDecoderTest, AllStoreMemCombinations) {
}
}
-
namespace {
// A helper for tests that require a module environment for functions and
// globals.
@@ -1011,10 +1153,9 @@ class TestModuleEnv : public ModuleEnv {
TestModuleEnv() {
instance = nullptr;
module = &mod;
- linker = nullptr;
}
- byte AddGlobal(MachineType mem_type) {
- mod.globals.push_back({0, 0, mem_type, 0, false});
+ byte AddGlobal(LocalType type) {
+ mod.globals.push_back({0, 0, type, 0, false});
CHECK(mod.globals.size() <= 127);
return static_cast<byte>(mod.globals.size() - 1);
}
@@ -1024,12 +1165,23 @@ class TestModuleEnv : public ModuleEnv {
return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
- mod.functions.push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ mod.functions.push_back({sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0}); // code_end_offset
CHECK(mod.functions.size() <= 127);
return static_cast<byte>(mod.functions.size() - 1);
}
byte AddImport(FunctionSig* sig) {
- mod.import_table.push_back({sig, 0, 0});
+ mod.import_table.push_back({sig, // sig
+ 0, // sig_index
+ 0, // module_name_offset
+ 0, // module_name_length
+ 0, // function_name_offset
+ 0}); // function_name_length
CHECK(mod.import_table.size() <= 127);
return static_cast<byte>(mod.import_table.size() - 1);
}
@@ -1048,9 +1200,9 @@ TEST_F(AstDecoderTest, SimpleCalls) {
module_env.AddFunction(sigs.i_i());
module_env.AddFunction(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(27)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION0(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION2(2, WASM_I8(37), WASM_I8(77)));
}
TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
@@ -1063,35 +1215,8 @@ TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
module_env.AddFunction(sigs.f_ff());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
-}
-
-TEST_F(AstDecoderTest, CallsWithSpilloverArgs) {
- static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
- FunctionSig sig_i_ff(1, 2, a_i_ff);
-
- TestModuleEnv module_env;
- module = &module_env;
-
- module_env.AddFunction(&sig_i_ff);
-
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_VERIFIES_INLINE(sigs.i_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_FAILURE_INLINE(sigs.f_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(0.2)));
-
- EXPECT_VERIFIES_INLINE(
- sigs.f_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(11)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(2, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
@@ -1101,9 +1226,9 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
module_env.AddFunction(sigs.i_i());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(17.1)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
@@ -1113,15 +1238,15 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(37.2)));
module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, SimpleIndirectCalls) {
@@ -1134,9 +1259,9 @@ TEST_F(AstDecoderTest, SimpleIndirectCalls) {
byte f2 = module_env.AddSignature(sigs.i_ii());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(22)));
EXPECT_VERIFIES_INLINE(
- sig, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+ sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
}
TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
@@ -1148,11 +1273,11 @@ TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
module_env.AddSignature(sigs.i_v());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(22)));
module_env.AddSignature(sigs.i_i());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I8(27)));
}
TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
@@ -1162,10 +1287,11 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f0 = module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27)));
EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
+ WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2)));
EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
@@ -1173,10 +1299,11 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f1 = module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16)));
EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
+ WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, SimpleImportCalls) {
@@ -1189,8 +1316,8 @@ TEST_F(AstDecoderTest, SimpleImportCalls) {
byte f2 = module_env.AddImport(sigs.i_ii());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(22)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f2, WASM_I8(32), WASM_I8(72)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT2(f2, WASM_I8(32), WASM_I8(72)));
}
TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
@@ -1201,16 +1328,16 @@ TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
byte f0 = module_env.AddImport(sigs.i_f());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_F64(37.2)));
byte f1 = module_env.AddImport(sigs.i_d());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f1));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, Int32Globals) {
@@ -1218,26 +1345,10 @@ TEST_F(AstDecoderTest, Int32Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int8());
- module_env.AddGlobal(MachineType::Uint8());
- module_env.AddGlobal(MachineType::Int16());
- module_env.AddGlobal(MachineType::Uint16());
- module_env.AddGlobal(MachineType::Int32());
- module_env.AddGlobal(MachineType::Uint32());
+ module_env.AddGlobal(kAstI32);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(2));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(3));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(4));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(5));
-
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Int32Globals_fail) {
@@ -1245,20 +1356,20 @@ TEST_F(AstDecoderTest, Int32Globals_fail) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int64());
- module_env.AddGlobal(MachineType::Uint64());
- module_env.AddGlobal(MachineType::Float32());
- module_env.AddGlobal(MachineType::Float64());
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstF32);
+ module_env.AddGlobal(kAstF64);
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(1));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(2));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(3));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(1));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(2));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(3));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Int64Globals) {
@@ -1266,14 +1377,14 @@ TEST_F(AstDecoderTest, Int64Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int64());
- module_env.AddGlobal(MachineType::Uint64());
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstI64);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Float32Globals) {
@@ -1281,10 +1392,10 @@ TEST_F(AstDecoderTest, Float32Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Float32());
+ module_env.AddGlobal(kAstF32);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Float64Globals) {
@@ -1292,54 +1403,70 @@ TEST_F(AstDecoderTest, Float64Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Float64());
+ module_env.AddGlobal(kAstF64);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
-TEST_F(AstDecoderTest, AllLoadGlobalCombinations) {
+TEST_F(AstDecoderTest, AllGetGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
- for (size_t j = 0; j < arraysize(machineTypes); j++) {
- MachineType mem_type = machineTypes[j];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType global_type = kLocalTypes[j];
FunctionSig sig(1, 0, &local_type);
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(mem_type);
- if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_LOAD_GLOBAL(0));
+ module_env.AddGlobal(global_type);
+ if (local_type == global_type) {
+ EXPECT_VERIFIES_INLINE(&sig, WASM_GET_GLOBAL(0));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(&sig, WASM_GET_GLOBAL(0));
}
}
}
}
-TEST_F(AstDecoderTest, AllStoreGlobalCombinations) {
+TEST_F(AstDecoderTest, AllSetGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
- for (size_t j = 0; j < arraysize(machineTypes); j++) {
- MachineType mem_type = machineTypes[j];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType global_type = kLocalTypes[j];
FunctionSig sig(0, 1, &local_type);
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(mem_type);
- if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ module_env.AddGlobal(global_type);
+ if (local_type == global_type) {
+ EXPECT_VERIFIES_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
}
}
}
+TEST_F(AstDecoderTest, BreakEnd) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B1(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B1(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, BreakIfBinop) {
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(), WASM_BLOCK(WASM_I32_ADD(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO),
+ WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.i_i(),
+ WASM_BLOCK(WASM_I32_ADD(
+ WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
+}
+
TEST_F(AstDecoderTest, BreakNesting1) {
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
byte code[] = {WASM_BLOCK(
- 2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i, WASM_ZERO)),
- WASM_SET_LOCAL(0, WASM_I8(1))),
+ WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i + 1, WASM_ZERO)),
+ WASM_SET_LOCAL(0, WASM_I8(1))),
WASM_GET_LOCAL(0))};
if (i < 3) {
EXPECT_VERIFIES(sigs.i_i(), code);
@@ -1353,8 +1480,8 @@ TEST_F(AstDecoderTest, BreakNesting2) {
AddLocals(kAstI32, 1);
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if 0 break[N]) (set p 1)) (return p)) (11)
- byte code[] = {WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BREAK(i)),
- WASM_SET_LOCAL(0, WASM_I8(1)))),
+ byte code[] = {B1(WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)),
+ WASM_SET_LOCAL(0, WASM_I8(1)))),
WASM_I8(11)};
if (i < 2) {
EXPECT_VERIFIES(sigs.v_v(), code);
@@ -1367,8 +1494,8 @@ TEST_F(AstDecoderTest, BreakNesting2) {
TEST_F(AstDecoderTest, BreakNesting3) {
for (int i = 0; i < 5; i++) {
// (block[1] (loop[1] (block[1] (if 0 break[N])
- byte code[] = {WASM_BLOCK(
- 1, WASM_LOOP(1, WASM_BLOCK(1, WASM_IF(WASM_ZERO, WASM_BREAK(i)))))};
+ byte code[] = {
+ WASM_BLOCK(WASM_LOOP(B1(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)))))};
if (i < 3) {
EXPECT_VERIFIES(sigs.v_v(), code);
} else {
@@ -1378,41 +1505,42 @@ TEST_F(AstDecoderTest, BreakNesting3) {
}
TEST_F(AstDecoderTest, BreaksWithMultipleTypes) {
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
-
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(8)),
- WASM_BRV_IF_ZERO(0, WASM_I8(0)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(9)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
- WASM_BRV_IF_ZERO(0, WASM_I8(11))));
+ B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
+
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(8)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(0)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(11))));
}
TEST_F(AstDecoderTest, BreakNesting_6_levels) {
for (int mask = 0; mask < 64; mask++) {
for (int i = 0; i < 14; i++) {
byte code[] = {
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBr, static_cast<byte>(i),
- kExprNop // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBr, ARITY_0, static_cast<byte>(i), // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd // --
};
int depth = 6;
for (int l = 0; l < 6; l++) {
if (mask & (1 << l)) {
- code[l * 2] = kExprLoop;
+ code[l] = kExprLoop;
depth++;
}
}
@@ -1432,29 +1560,26 @@ TEST_F(AstDecoderTest, ExprBreak_TypeCheck) {
FunctionSig* sig = sigarray[i];
// unify X and X => OK
EXPECT_VERIFIES_INLINE(
- sig, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
+ sig, B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
}
// unify i32 and f32 => fail
EXPECT_FAILURE_INLINE(
sigs.i_i(),
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
+ B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
// unify f64 and f64 => OK
EXPECT_VERIFIES_INLINE(
sigs.d_dd(),
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_F64(1.2)));
+ B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))), WASM_F64(1.2)));
}
TEST_F(AstDecoderTest, ExprBreak_TypeCheckAll) {
- byte code1[] = {WASM_BLOCK(2,
- WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ byte code1[] = {WASM_BLOCK(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
- byte code2[] = {
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(1))};
+ byte code2[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
@@ -1479,14 +1604,12 @@ TEST_F(AstDecoderTest, ExprBr_Unify) {
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- byte code1[] = {
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(which))),
- WASM_GET_LOCAL(which ^ 1))};
+ byte code1[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
byte code2[] = {
- WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BRV(2, WASM_GET_LOCAL(which))),
WASM_GET_LOCAL(which ^ 1))};
-
if (type == kAstI32) {
EXPECT_VERIFIES(&sig, code1);
EXPECT_VERIFIES(&sig, code2);
@@ -1499,8 +1622,7 @@ TEST_F(AstDecoderTest, ExprBr_Unify) {
}
TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
- byte code[] = {
- WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
+ byte code[] = {B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
@@ -1516,9 +1638,8 @@ TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
}
TEST_F(AstDecoderTest, ExprBrIf_val_type) {
- byte code[] = {
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- WASM_GET_LOCAL(0))};
+ byte code[] = {B2(WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j],
@@ -1541,9 +1662,9 @@ TEST_F(AstDecoderTest, ExprBrIf_Unify) {
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- byte code1[] = {WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
- byte code2[] = {WASM_LOOP(2, WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
+ byte code1[] = {B2(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {WASM_LOOP(WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
@@ -1558,31 +1679,29 @@ TEST_F(AstDecoderTest, ExprBrIf_Unify) {
}
TEST_F(AstDecoderTest, BrTable0) {
- static byte code[] = {kExprBrTable, 0, 0};
+ static byte code[] = {kExprNop, kExprBrTable, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable0b) {
- static byte code[] = {kExprBrTable, 0, 0, kExprI32Const, 11};
+ static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, BrTable0c) {
- static byte code[] = {kExprBrTable, 0, 1, 0, 0, kExprI32Const, 11};
+ static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 1, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, BrTable1a) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable1b) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
EXPECT_FAILURE(sigs.f_ff(), code);
@@ -1591,20 +1710,18 @@ TEST_F(AstDecoderTest, BrTable1b) {
TEST_F(AstDecoderTest, BrTable2a) {
static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
+ B1(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable2b) {
static byte code[] = {WASM_BLOCK(
- 1, WASM_BLOCK(
- 1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
+ WASM_BLOCK(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable_off_end) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
for (size_t len = 1; len < sizeof(code); len++) {
Verify(kError, sigs.i_i(), code, code + len);
}
@@ -1612,8 +1729,7 @@ TEST_F(AstDecoderTest, BrTable_off_end) {
TEST_F(AstDecoderTest, BrTable_invalid_br1) {
for (int depth = 0; depth < 4; depth++) {
- byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
if (depth == 0) {
EXPECT_VERIFIES(sigs.v_i(), code);
} else {
@@ -1625,7 +1741,7 @@ TEST_F(AstDecoderTest, BrTable_invalid_br1) {
TEST_F(AstDecoderTest, BrTable_invalid_br2) {
for (int depth = 0; depth < 4; depth++) {
byte code[] = {
- WASM_LOOP(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ WASM_LOOP(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
if (depth <= 1) {
EXPECT_VERIFIES(sigs.v_i(), code);
} else {
@@ -1635,20 +1751,19 @@ TEST_F(AstDecoderTest, BrTable_invalid_br2) {
}
TEST_F(AstDecoderTest, ExprBreakNesting1) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_BLOCK(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_LOOP(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+ WASM_LOOP(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(1)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
}
TEST_F(AstDecoderTest, Select) {
@@ -1714,6 +1829,89 @@ TEST_F(AstDecoderTest, Select_TypeCheck) {
WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
}
+TEST_F(AstDecoderTest, Throw) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprThrow);
+
+ EXPECT_FAILURE_INLINE(sigs.i_d(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I32V(0));
+ EXPECT_FAILURE_INLINE(sigs.i_f(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I32V(0));
+ EXPECT_FAILURE_INLINE(sigs.l_l(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I64V(0));
+}
+
+#define WASM_CATCH(local) kExprCatch, static_cast<byte>(local)
+TEST_F(AstDecoderTest, TryCatch) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprEnd);
+
+ // Missing catch.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryCatch, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0));
+
+ // Double catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), WASM_CATCH(0),
+ kExprEnd);
+
+ // Unexpected finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, TryFinally) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprEnd);
+
+ // Mising finally.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally);
+
+ // Double finally.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprFinally,
+ kExprEnd);
+
+ // Unexpected catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, TryCatchFinally) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally, kExprEnd);
+
+ // Missing catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
+ kExprEnd);
+
+ // Double catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ WASM_CATCH(0), kExprFinally, kExprEnd);
+
+ // Missing finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprEnd);
+
+ // Double finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally, kExprFinally, kExprEnd);
+
+ // Finally before catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
+ WASM_CATCH(0), kExprEnd);
+
+ // Missing both try and finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally);
+}
class WasmOpcodeLengthTest : public TestWithZone {
public:
@@ -1734,33 +1932,38 @@ class WasmOpcodeLengthTest : public TestWithZone {
TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprNop);
- EXPECT_LENGTH(2, kExprBlock);
- EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(1, kExprBlock);
+ EXPECT_LENGTH(1, kExprLoop);
EXPECT_LENGTH(1, kExprIf);
- EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(1, kExprElse);
+ EXPECT_LENGTH(1, kExprEnd);
EXPECT_LENGTH(1, kExprSelect);
- EXPECT_LENGTH(2, kExprBr);
- EXPECT_LENGTH(2, kExprBrIf);
+ EXPECT_LENGTH(3, kExprBr);
+ EXPECT_LENGTH(3, kExprBrIf);
+ EXPECT_LENGTH(1, kExprThrow);
+ EXPECT_LENGTH(1, kExprTryCatch);
+ EXPECT_LENGTH(1, kExprTryFinally);
+ EXPECT_LENGTH(1, kExprTryCatchFinally);
+ EXPECT_LENGTH(2, kExprCatch);
+ EXPECT_LENGTH(1, kExprFinally);
}
-
TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprI8Const);
EXPECT_LENGTH(5, kExprF32Const);
EXPECT_LENGTH(9, kExprF64Const);
EXPECT_LENGTH(2, kExprGetLocal);
EXPECT_LENGTH(2, kExprSetLocal);
- EXPECT_LENGTH(2, kExprLoadGlobal);
- EXPECT_LENGTH(2, kExprStoreGlobal);
- EXPECT_LENGTH(2, kExprCallFunction);
- EXPECT_LENGTH(2, kExprCallImport);
- EXPECT_LENGTH(2, kExprCallIndirect);
+ EXPECT_LENGTH(2, kExprGetGlobal);
+ EXPECT_LENGTH(2, kExprSetGlobal);
+ EXPECT_LENGTH(3, kExprCallFunction);
+ EXPECT_LENGTH(3, kExprCallImport);
+ EXPECT_LENGTH(3, kExprCallIndirect);
EXPECT_LENGTH(1, kExprIf);
- EXPECT_LENGTH(1, kExprIfElse);
- EXPECT_LENGTH(2, kExprBlock);
- EXPECT_LENGTH(2, kExprLoop);
- EXPECT_LENGTH(2, kExprBr);
- EXPECT_LENGTH(2, kExprBrIf);
+ EXPECT_LENGTH(1, kExprBlock);
+ EXPECT_LENGTH(1, kExprLoop);
+ EXPECT_LENGTH(3, kExprBr);
+ EXPECT_LENGTH(3, kExprBrIf);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -1784,11 +1987,11 @@ TEST_F(WasmOpcodeLengthTest, I64Const) {
}
TEST_F(WasmOpcodeLengthTest, VariableLength) {
- EXPECT_LENGTH_N(2, kExprLoadGlobal, U32V_1(1));
- EXPECT_LENGTH_N(3, kExprLoadGlobal, U32V_2(33));
- EXPECT_LENGTH_N(4, kExprLoadGlobal, U32V_3(44));
- EXPECT_LENGTH_N(5, kExprLoadGlobal, U32V_4(66));
- EXPECT_LENGTH_N(6, kExprLoadGlobal, U32V_5(77));
+ EXPECT_LENGTH_N(2, kExprGetGlobal, U32V_1(1));
+ EXPECT_LENGTH_N(3, kExprGetGlobal, U32V_2(33));
+ EXPECT_LENGTH_N(4, kExprGetGlobal, U32V_3(44));
+ EXPECT_LENGTH_N(5, kExprGetGlobal, U32V_4(66));
+ EXPECT_LENGTH_N(6, kExprGetGlobal, U32V_5(77));
}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
@@ -1818,13 +2021,11 @@ TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
EXPECT_LENGTH(3, kExprF64StoreMem);
}
-
TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
EXPECT_LENGTH(1, kExprMemorySize);
EXPECT_LENGTH(1, kExprGrowMemory);
}
-
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI32Add);
EXPECT_LENGTH(1, kExprI32Sub);
@@ -1946,54 +2147,56 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI64ReinterpretF64);
}
-
class WasmOpcodeArityTest : public TestWithZone {
public:
WasmOpcodeArityTest() : TestWithZone() {}
- TestModuleEnv module;
- TestSignatures sigs;
};
-#define EXPECT_ARITY(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeArity(&module, sig, code, code + sizeof(code))); \
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(code, code + sizeof(code))); \
}
TEST_F(WasmOpcodeArityTest, Control) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprNop);
EXPECT_ARITY(0, kExprBlock, 0);
- EXPECT_ARITY(1, kExprBlock, 1);
- EXPECT_ARITY(2, kExprBlock, 2);
- EXPECT_ARITY(5, kExprBlock, 5);
- EXPECT_ARITY(10, kExprBlock, 10);
+ EXPECT_ARITY(0, kExprBlock, 1);
+ EXPECT_ARITY(0, kExprBlock, 2);
+ EXPECT_ARITY(0, kExprBlock, 5);
+ EXPECT_ARITY(0, kExprBlock, 10);
EXPECT_ARITY(0, kExprLoop, 0);
- EXPECT_ARITY(1, kExprLoop, 1);
- EXPECT_ARITY(2, kExprLoop, 2);
- EXPECT_ARITY(7, kExprLoop, 7);
- EXPECT_ARITY(11, kExprLoop, 11);
+ EXPECT_ARITY(0, kExprLoop, 1);
+ EXPECT_ARITY(0, kExprLoop, 2);
+ EXPECT_ARITY(0, kExprLoop, 7);
+ EXPECT_ARITY(0, kExprLoop, 11);
- EXPECT_ARITY(2, kExprIf);
- EXPECT_ARITY(3, kExprIfElse);
EXPECT_ARITY(3, kExprSelect);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(0, kExprBr);
+ EXPECT_ARITY(1, kExprBrIf);
+ EXPECT_ARITY(1, kExprBrTable);
+
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
{
- sig = sigs.v_v();
- EXPECT_ARITY(0, kExprReturn);
- sig = sigs.i_i();
- EXPECT_ARITY(1, kExprReturn);
+ EXPECT_ARITY(0, kExprReturn, ARITY_0);
+ EXPECT_ARITY(1, kExprReturn, ARITY_1);
}
-}
+ EXPECT_ARITY(0, kExprThrow);
+ EXPECT_ARITY(0, kExprTryCatch);
+ EXPECT_ARITY(0, kExprTryFinally);
+ EXPECT_ARITY(0, kExprTryCatchFinally);
+ EXPECT_ARITY(1, kExprCatch, 2);
+ EXPECT_ARITY(0, kExprFinally);
+}
TEST_F(WasmOpcodeArityTest, Misc) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprI8Const);
EXPECT_ARITY(0, kExprI32Const);
EXPECT_ARITY(0, kExprF32Const);
@@ -2001,45 +2204,41 @@ TEST_F(WasmOpcodeArityTest, Misc) {
EXPECT_ARITY(0, kExprF64Const);
EXPECT_ARITY(0, kExprGetLocal);
EXPECT_ARITY(1, kExprSetLocal);
- EXPECT_ARITY(0, kExprLoadGlobal);
- EXPECT_ARITY(1, kExprStoreGlobal);
+ EXPECT_ARITY(0, kExprGetGlobal);
+ EXPECT_ARITY(1, kExprSetGlobal);
}
-
TEST_F(WasmOpcodeArityTest, Calls) {
- module.AddFunction(sigs.i_ii());
- module.AddFunction(sigs.i_i());
-
- module.AddSignature(sigs.f_ff());
- module.AddSignature(sigs.i_d());
-
- module.AddImport(sigs.f_ff());
- module.AddImport(sigs.i_d());
-
{
- FunctionSig* sig = sigs.i_ii();
+ EXPECT_ARITY(2, kExprCallFunction, 2, 0);
+ EXPECT_ARITY(2, kExprCallImport, 2, 0);
+ EXPECT_ARITY(3, kExprCallIndirect, 2, 0);
- EXPECT_ARITY(2, kExprCallFunction, 0);
- EXPECT_ARITY(2, kExprCallImport, 0);
- EXPECT_ARITY(3, kExprCallIndirect, 0);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
+
+ EXPECT_ARITY(0, kExprBr, ARITY_0);
+ EXPECT_ARITY(1, kExprBrIf, ARITY_0);
+ EXPECT_ARITY(1, kExprBrTable, ARITY_0);
}
{
- FunctionSig* sig = sigs.v_v();
+ EXPECT_ARITY(1, kExprCallFunction, ARITY_1, 1);
+ EXPECT_ARITY(1, kExprCallImport, ARITY_1, 1);
+ EXPECT_ARITY(2, kExprCallIndirect, ARITY_1, 1);
+
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
- EXPECT_ARITY(1, kExprCallFunction, 1);
- EXPECT_ARITY(1, kExprCallImport, 1);
- EXPECT_ARITY(2, kExprCallIndirect, 1);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(0, kExprBr, ARITY_0);
+ EXPECT_ARITY(1, kExprBrIf, ARITY_0);
+ EXPECT_ARITY(1, kExprBrTable, ARITY_0);
}
}
-
TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(1, kExprI32LoadMem8S);
EXPECT_ARITY(1, kExprI32LoadMem8U);
EXPECT_ARITY(1, kExprI32LoadMem16S);
@@ -2067,16 +2266,12 @@ TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
EXPECT_ARITY(2, kExprF64StoreMem);
}
-
TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprMemorySize);
EXPECT_ARITY(1, kExprGrowMemory);
}
-
TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(2, kExprI32Add);
EXPECT_ARITY(2, kExprI32Sub);
EXPECT_ARITY(2, kExprI32Mul);
@@ -2246,7 +2441,7 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
LocalTypeMap map = Expand(decls);
EXPECT_EQ(1, map.size());
- EXPECT_EQ(type, map.at(0));
+ EXPECT_EQ(type, map[0]);
}
}
@@ -2297,12 +2492,12 @@ TEST_F(LocalDeclDecoderTest, MixedLocals) {
TEST_F(LocalDeclDecoderTest, UseEncoder) {
const byte* data = nullptr;
const byte* end = nullptr;
- LocalDeclEncoder local_decls;
+ LocalDeclEncoder local_decls(zone());
local_decls.AddLocals(5, kAstF32);
local_decls.AddLocals(1337, kAstI32);
local_decls.AddLocals(212, kAstI64);
- local_decls.Prepend(&data, &end);
+ local_decls.Prepend(zone(), &data, &end);
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, end);
@@ -2314,7 +2509,60 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kAstF32, 5);
pos = ExpectRun(map, pos, kAstI32, 1337);
pos = ExpectRun(map, pos, kAstI64, 212);
- delete[] data;
+}
+
+class BytecodeIteratorTest : public TestWithZone {};
+
+TEST_F(BytecodeIteratorTest, SimpleForeach) {
+ byte code[] = {WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO)};
+ BytecodeIterator iter(code, code + sizeof(code));
+ WasmOpcode expected[] = {kExprI8Const, kExprIf, kExprI8Const,
+ kExprElse, kExprI8Const, kExprEnd};
+ size_t pos = 0;
+ for (WasmOpcode opcode : iter) {
+ if (pos >= arraysize(expected)) {
+ EXPECT_TRUE(false);
+ break;
+ }
+ EXPECT_EQ(expected[pos++], opcode);
+ }
+ EXPECT_EQ(arraysize(expected), pos);
+}
+
+TEST_F(BytecodeIteratorTest, ForeachTwice) {
+ byte code[] = {WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO)};
+ BytecodeIterator iter(code, code + sizeof(code));
+ int count = 0;
+
+ count = 0;
+ for (WasmOpcode opcode : iter) {
+ USE(opcode);
+ count++;
+ }
+ EXPECT_EQ(6, count);
+
+ count = 0;
+ for (WasmOpcode opcode : iter) {
+ USE(opcode);
+ count++;
+ }
+ EXPECT_EQ(6, count);
+}
+
+TEST_F(BytecodeIteratorTest, WithAstDecls) {
+ byte code[] = {1, 1, kLocalI32, WASM_I8(9), WASM_I8(11)};
+ AstLocalDecls decls(zone());
+ BytecodeIterator iter(code, code + sizeof(code), &decls);
+
+ EXPECT_EQ(3, decls.decls_encoded_size);
+ EXPECT_EQ(3, iter.pc_offset());
+ EXPECT_TRUE(iter.has_next());
+ EXPECT_EQ(kExprI8Const, iter.current());
+ iter.next();
+ EXPECT_TRUE(iter.has_next());
+ EXPECT_EQ(kExprI8Const, iter.current());
+ iter.next();
+ EXPECT_FALSE(iter.has_next());
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
new file mode 100644
index 0000000000..2b67f12ef5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -0,0 +1,402 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#include "src/v8.h"
+
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-macro-gen.h"
+
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define B1(a) kExprBlock, a, kExprEnd
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+
+struct ExpectedTarget {
+ pc_t pc;
+ ControlTransfer expected;
+};
+
+// For nicer error messages.
+class ControlTransferMatcher : public MatcherInterface<const ControlTransfer&> {
+ public:
+ explicit ControlTransferMatcher(pc_t pc, const ControlTransfer& expected)
+ : pc_(pc), expected_(expected) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "@" << pc_ << " {pcdiff = " << expected_.pcdiff
+ << ", spdiff = " << expected_.spdiff
+ << ", action = " << expected_.action << "}";
+ }
+
+ bool MatchAndExplain(const ControlTransfer& input,
+ MatchResultListener* listener) const override {
+ if (input.pcdiff != expected_.pcdiff || input.spdiff != expected_.spdiff ||
+ input.action != expected_.action) {
+ *listener << "@" << pc_ << " {pcdiff = " << input.pcdiff
+ << ", spdiff = " << input.spdiff
+ << ", action = " << input.action << "}";
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ pc_t pc_;
+ const ControlTransfer& expected_;
+};
+
+class ControlTransferTest : public TestWithZone {
+ public:
+ void CheckControlTransfers(const byte* start, const byte* end,
+ ExpectedTarget* expected_targets,
+ size_t num_targets) {
+ ControlTransferMap map =
+ WasmInterpreter::ComputeControlTransfersForTesting(zone(), start, end);
+ // Check all control targets in the map.
+ for (size_t i = 0; i < num_targets; i++) {
+ pc_t pc = expected_targets[i].pc;
+ auto it = map.find(pc);
+ if (it == map.end()) {
+ printf("expected control target @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ } else {
+ ControlTransfer& expected = expected_targets[i].expected;
+ ControlTransfer& target = it->second;
+ EXPECT_THAT(target,
+ MakeMatcher(new ControlTransferMatcher(pc, expected)));
+ }
+ }
+
+ // Check there are no other control targets.
+ for (pc_t pc = 0; start + pc < end; pc++) {
+ bool found = false;
+ for (size_t i = 0; i < num_targets; i++) {
+ if (expected_targets[i].pc == pc) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ if (map.find(pc) != map.end()) {
+ printf("expected no control @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ }
+ }
+ }
+};
+
+// Macro for simplifying tests below.
+#define EXPECT_TARGETS(...) \
+ do { \
+ ExpectedTarget pairs[] = {__VA_ARGS__}; \
+ CheckControlTransfers(code, code + sizeof(code), pairs, arraysize(pairs)); \
+ } while (false)
+
+TEST_F(ControlTransferTest, SimpleIf) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprEnd // @3
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf2) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprElse, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kNoAction}}, // --
+ {3, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {3, 1, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBrElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprElse, // @6
+ kExprEnd // @7
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kNoAction}}, // --
+ {3, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {2, 1, ControlTransfer::kPopAndRepush}}, // --
+ {7, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, IfElseBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprBr, // @5
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @8
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {5, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {8, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BlockEmpty) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprEnd // @1
+ };
+ EXPECT_TARGETS({1, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, Br0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}},
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({3, {4, 2, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0b) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({1, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0c) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({1, {6, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {-1, 0, ControlTransfer::kNoAction}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {-2, 1, ControlTransfer::kNoAction}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 0, // +1
+ U32_LE(0), // +4
+ kExprEnd // @10
+ };
+ EXPECT_TARGETS({3, {8, 0, ControlTransfer::kPushVoid}}, // --
+ {10, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 1, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ kExprEnd // @14
+ };
+ EXPECT_TARGETS({3, {12, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {11, 0, ControlTransfer::kPushVoid}}, // --
+ {14, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBlock, // @1
+ kExprI8Const, // @2
+ 0, // +1
+ kExprBrTable, // @4
+ ARITY_0, // +1
+ 2, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ U32_LE(1), // +4
+ kExprEnd, // @19
+ kExprEnd // @19
+ };
+ EXPECT_TARGETS({4, {16, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {19, {1, 1, ControlTransfer::kPopAndRepush}}, // --
+ {20, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 11d68f161e..e298f0ba9f 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/test-utils.h"
+#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-macro-gen.h"
@@ -22,7 +23,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -32,7 +33,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -42,7 +43,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -52,7 +53,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -365,7 +366,7 @@ TEST_F(DecoderTest, ReadI32v_FiveByte) {
TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(0, length);
@@ -375,7 +376,7 @@ TEST_F(DecoderTest, ReadU32v_off_end1) {
TEST_F(DecoderTest, ReadU32v_off_end2) {
static const byte data[] = {U32V_2(1111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -386,7 +387,7 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
TEST_F(DecoderTest, ReadU32v_off_end3) {
static const byte data[] = {U32V_3(111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -397,7 +398,7 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
TEST_F(DecoderTest, ReadU32v_off_end4) {
static const byte data[] = {U32V_4(11111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -408,7 +409,7 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
TEST_F(DecoderTest, ReadU32v_off_end5) {
static const byte data[] = {U32V_5(111111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -420,7 +421,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 16; i++) {
data[4] = static_cast<byte>(i << 4);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(5, length);
@@ -430,7 +431,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -440,7 +441,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -477,7 +478,7 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// foreach buffer size 0...5
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint32_t result = decoder.checked_read_u32v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -533,7 +534,7 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int length;
+ unsigned length;
uint64_t result = decoder.checked_read_u64v(data, 0, &length);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
@@ -574,7 +575,7 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint64_t result = decoder.checked_read_u64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -616,7 +617,7 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
int64_t result = decoder.checked_read_i64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -634,7 +635,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 128; i++) {
data[9] = static_cast<byte>(i << 1);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u64v(decoder.start(), 0, &length);
EXPECT_EQ(10, length);
@@ -644,7 +645,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
@@ -654,7 +655,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/encoder-unittest.cc
index 740c0540dc..47885e697d 100644
--- a/deps/v8/test/unittests/wasm/encoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/encoder-unittest.cc
@@ -9,6 +9,8 @@
#include "src/wasm/ast-decoder.h"
#include "src/wasm/encoder.h"
+#include "test/cctest/wasm/test-signatures.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -17,202 +19,10 @@ class EncoderTest : public TestWithZone {
protected:
void AddLocal(WasmFunctionBuilder* f, LocalType type) {
uint16_t index = f->AddLocal(type);
- const std::vector<uint8_t>& out_index = UnsignedLEB128From(index);
- std::vector<uint8_t> code;
- code.push_back(kExprGetLocal);
- for (size_t i = 0; i < out_index.size(); i++) {
- code.push_back(out_index.at(i));
- }
- uint32_t local_indices[] = {1};
- f->EmitCode(&code[0], static_cast<uint32_t>(code.size()), local_indices, 1);
- }
-
- void CheckReadValue(uint8_t* leb_value, uint32_t expected_result,
- int expected_length,
- ReadUnsignedLEB128ErrorCode expected_error_code) {
- int length;
- uint32_t result;
- ReadUnsignedLEB128ErrorCode error_code =
- ReadUnsignedLEB128Operand(leb_value, leb_value + 5, &length, &result);
- CHECK_EQ(error_code, expected_error_code);
- if (error_code == 0) {
- CHECK_EQ(result, expected_result);
- CHECK_EQ(length, expected_length);
- }
- }
-
- void CheckWriteValue(uint32_t input, int length, uint8_t* vals) {
- const std::vector<uint8_t> result = UnsignedLEB128From(input);
- CHECK_EQ(result.size(), length);
- for (int i = 0; i < length; i++) {
- CHECK_EQ(result.at(i), vals[i]);
- }
+ f->EmitGetLocal(index);
}
};
-
-TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- uint16_t local_f32 = function->AddLocal(kAstF32);
- uint16_t param_float32 = function->AddParam(kAstF32);
- uint16_t local_i32 = function->AddLocal(kAstI32);
- uint16_t local_f64 = function->AddLocal(kAstF64);
- uint16_t local_i64 = function->AddLocal(kAstI64);
- uint16_t param_int32 = function->AddParam(kAstI32);
- uint16_t local_i32_2 = function->AddLocal(kAstI32);
-
- byte code[] = {kExprGetLocal, static_cast<uint8_t>(param_float32)};
- uint32_t local_indices[] = {1};
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(param_int32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i32_2);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i64);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_f32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_f64);
- function->EmitCode(code, sizeof(code), local_indices, 1);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
-}
-
-
-TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- for (size_t i = 0; i < 128; i++) {
- AddLocal(function, kAstF32);
- }
- AddLocal(function, kAstI32);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
- body = buffer + f->HeaderSize();
-}
-
-TEST_F(EncoderTest, Function_Builder_Block_Variable_Width) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->EmitWithVarInt(kExprBlock, 200);
- for (int i = 0; i < 200; ++i) {
- function->Emit(kExprNop);
- }
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- CHECK_EQ(f->BodySize(), 204);
-}
-
-TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->Emit(kExprLoop);
- uint32_t offset = function->EmitEditableVarIntImmediate();
- for (int i = 0; i < 200; ++i) {
- function->Emit(kExprNop);
- }
- function->EditVarIntImmediate(offset, 200);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- CHECK_EQ(f->BodySize(), 204);
-}
-
-TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate_Locals) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->Emit(kExprBlock);
- uint32_t offset = function->EmitEditableVarIntImmediate();
- for (int i = 0; i < 200; ++i) {
- AddLocal(function, kAstI32);
- }
- function->EditVarIntImmediate(offset, 200);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
- body = buffer + f->HeaderSize();
-
- CHECK_EQ(f->BodySize(), 479);
- const uint8_t varint200_low = (200 & 0x7f) | 0x80;
- const uint8_t varint200_high = (200 >> 7) & 0x7f;
- offset = 0;
- CHECK_EQ(body[offset++], 1); // Local decl count.
- CHECK_EQ(body[offset++], varint200_low);
- CHECK_EQ(body[offset++], varint200_high);
- CHECK_EQ(body[offset++], kLocalI32);
- CHECK_EQ(body[offset++], kExprBlock);
- CHECK_EQ(body[offset++], varint200_low);
- CHECK_EQ(body[offset++], varint200_high);
- // GetLocal with one-byte indices.
- for (int i = 0; i <= 127; ++i) {
- CHECK_EQ(body[offset++], kExprGetLocal);
- CHECK_EQ(body[offset++], i);
- }
- // GetLocal with two-byte indices.
- for (int i = 128; i < 200; ++i) {
- CHECK_EQ(body[offset++], kExprGetLocal);
- CHECK_EQ(body[offset++], (i & 0x7f) | 0x80);
- CHECK_EQ(body[offset++], (i >> 7) & 0x7f);
- }
- CHECK_EQ(offset, 479);
-}
-
-TEST_F(EncoderTest, LEB_Functions) {
- byte leb_value[5] = {0, 0, 0, 0, 0};
- CheckReadValue(leb_value, 0, 1, kNoError);
- CheckWriteValue(0, 1, leb_value);
- leb_value[0] = 23;
- CheckReadValue(leb_value, 23, 1, kNoError);
- CheckWriteValue(23, 1, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x01;
- CheckReadValue(leb_value, 128, 2, kNoError);
- CheckWriteValue(128, 2, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x80;
- leb_value[2] = 0x80;
- leb_value[3] = 0x80;
- leb_value[4] = 0x01;
- CheckReadValue(leb_value, 0x10000000, 5, kNoError);
- CheckWriteValue(0x10000000, 5, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x80;
- leb_value[2] = 0x80;
- leb_value[3] = 0x80;
- leb_value[4] = 0x80;
- CheckReadValue(leb_value, -1, -1, kInvalidLEB128);
-}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
new file mode 100644
index 0000000000..b9759332bb
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -0,0 +1,191 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/objects-inl.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/leb-helper.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class LEBHelperTest : public TestWithZone {};
+
+TEST_F(LEBHelperTest, sizeof_u32v) {
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(0));
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(1));
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(3));
+
+ for (uint32_t i = 4; i < 128; i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 7); i < (1 << 9); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 14); i < (1 << 16); i += 33) {
+ EXPECT_EQ(3, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 21); i < (1 << 24); i += 33999) {
+ EXPECT_EQ(4, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 28); i < (1 << 31); i += 33997779) {
+ EXPECT_EQ(5, LEBHelper::sizeof_u32v(i));
+ }
+
+ EXPECT_EQ(5, LEBHelper::sizeof_u32v(0xFFFFFFFF));
+}
+
+TEST_F(LEBHelperTest, sizeof_i32v) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(0));
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(1));
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(3));
+
+ for (int32_t i = 0; i < (1 << 6); i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 6); i < (1 << 8); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 13); i < (1 << 15); i += 31) {
+ EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 20); i < (1 << 22); i += 31991) {
+ EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 27); i < (1 << 29); i += 3199893) {
+ EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 6); i <= 0; i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 13); i < -(1 << 6); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 20); i < -(1 << 18); i += 11) {
+ EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 27); i < -(1 << 25); i += 11999) {
+ EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 30); i < -(1 << 28); i += 1199999) {
+ EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ }
+}
+
+#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
+ static void CheckEncodeDecode_##name(ctype val) { \
+ static const int kSize = 16; \
+ static byte buffer[kSize]; \
+ byte *ptr = buffer; \
+ LEBHelper::write_##name(&ptr, val); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), \
+ static_cast<size_t>(ptr - buffer)); \
+ Decoder decoder(buffer, buffer + kSize); \
+ unsigned length = 0; \
+ ctype result = decoder.checked_read_##name(buffer, 0, &length); \
+ EXPECT_EQ(val, result); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
+ }
+
+DECLARE_ENCODE_DECODE_CHECKER(int32_t, i32v)
+DECLARE_ENCODE_DECODE_CHECKER(uint32_t, u32v)
+DECLARE_ENCODE_DECODE_CHECKER(int64_t, i64v)
+DECLARE_ENCODE_DECODE_CHECKER(uint64_t, u64v)
+
+TEST_F(LEBHelperTest, WriteAndDecode_u32v) {
+ CheckEncodeDecode_u32v(0);
+ CheckEncodeDecode_u32v(1);
+ CheckEncodeDecode_u32v(5);
+ CheckEncodeDecode_u32v(99);
+ CheckEncodeDecode_u32v(298);
+ CheckEncodeDecode_u32v(87348723);
+ CheckEncodeDecode_u32v(77777);
+
+ for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_u32v(val);
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_i32v) {
+ CheckEncodeDecode_i32v(0);
+ CheckEncodeDecode_i32v(1);
+ CheckEncodeDecode_i32v(5);
+ CheckEncodeDecode_i32v(99);
+ CheckEncodeDecode_i32v(298);
+ CheckEncodeDecode_i32v(87348723);
+ CheckEncodeDecode_i32v(77777);
+
+ CheckEncodeDecode_i32v(-2);
+ CheckEncodeDecode_i32v(-4);
+ CheckEncodeDecode_i32v(-59);
+ CheckEncodeDecode_i32v(-288);
+ CheckEncodeDecode_i32v(-12608);
+ CheckEncodeDecode_i32v(-87328723);
+ CheckEncodeDecode_i32v(-77377);
+
+ for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_i32v(bit_cast<int32_t>(val));
+ }
+
+ for (uint32_t val = 0xFFFFFF3B; val != 0; val = val << 1) {
+ CheckEncodeDecode_i32v(bit_cast<int32_t>(val));
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_u64v) {
+ CheckEncodeDecode_u64v(0);
+ CheckEncodeDecode_u64v(1);
+ CheckEncodeDecode_u64v(5);
+ CheckEncodeDecode_u64v(99);
+ CheckEncodeDecode_u64v(298);
+ CheckEncodeDecode_u64v(87348723);
+ CheckEncodeDecode_u64v(77777);
+
+ for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_u64v(val);
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_i64v) {
+ CheckEncodeDecode_i64v(0);
+ CheckEncodeDecode_i64v(1);
+ CheckEncodeDecode_i64v(5);
+ CheckEncodeDecode_i64v(99);
+ CheckEncodeDecode_i64v(298);
+ CheckEncodeDecode_i64v(87348723);
+ CheckEncodeDecode_i64v(77777);
+
+ CheckEncodeDecode_i64v(-2);
+ CheckEncodeDecode_i64v(-4);
+ CheckEncodeDecode_i64v(-59);
+ CheckEncodeDecode_i64v(-288);
+ CheckEncodeDecode_i64v(-87648723);
+ CheckEncodeDecode_i64v(-77377);
+
+ for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_i64v(bit_cast<int64_t>(val));
+ }
+
+ for (uint64_t val = 0xFFFFFFFFFFFFFF3B; val != 0; val = val << 1) {
+ CheckEncodeDecode_i64v(bit_cast<int64_t>(val));
+ }
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index e77c1cfff5..919ce8e234 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -32,14 +32,12 @@ class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
}
};
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Empty0) {
byte code[] = { 0 };
BitVector* assigned = Analyze(code, code);
CHECK_NULL(assigned);
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
byte code[] = {kExprLoop, 0};
for (int i = 0; i < 5; i++) {
@@ -51,11 +49,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
- byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i))};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
CHECK_EQ(j == i, assigned->Contains(j));
@@ -63,11 +60,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
- byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
CHECK_EQ(j == i, assigned->Contains(j));
@@ -75,12 +71,11 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
- byte code[] = {WASM_LOOP(2, WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int k = 0; k < assigned->length(); k++) {
bool expected = k == i || k == j;
@@ -90,12 +85,11 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
byte code[] = {WASM_LOOP(
- 1, WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
+ WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
bool expected = i == j || j == 0 || j == 1;
@@ -104,14 +98,12 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
}
}
-
static byte LEBByte(uint32_t val, byte which) {
byte b = (val >> (which * 7)) & 0x7F;
if (val >> ((which + 1) * 7)) b |= 0x80;
return b;
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
num_locals = 65000;
for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
@@ -133,11 +125,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
num_locals = 3;
byte code[] = {
- WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
+ WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
WASM_SET_ZERO(0)};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -147,14 +138,13 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
num_locals = 5;
byte code[] = {
- WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0),
- WASM_BRV(0, WASM_SET_LOCAL(
- 3, WASM_I32_SUB(WASM_GET_LOCAL(0),
- WASM_I8(1)))))),
+ WASM_LOOP(WASM_IF(
+ WASM_GET_LOCAL(0),
+ WASM_BRV(0, WASM_SET_LOCAL(
+ 3, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1)))))),
WASM_GET_LOCAL(0)};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -164,33 +154,38 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
num_locals = 6;
const byte kIter = 0;
const byte kSum = 3;
byte code[] = {WASM_BLOCK(
- 3,
WASM_WHILE(
WASM_GET_LOCAL(kIter),
- WASM_BLOCK(2, WASM_SET_LOCAL(
- kSum, WASM_F32_ADD(
- WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Float32(),
- WASM_GET_LOCAL(kIter)))),
- WASM_SET_LOCAL(kIter, WASM_I32_SUB(WASM_GET_LOCAL(kIter),
- WASM_I8(4))))),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kSum, WASM_F32_ADD(WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_GET_LOCAL(kIter)))),
+ WASM_SET_LOCAL(kIter,
+ WASM_I32_SUB(WASM_GET_LOCAL(kIter), WASM_I8(4))))),
WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_GET_LOCAL(kSum)),
WASM_GET_LOCAL(kIter))};
- BitVector* assigned = Analyze(code + 2, code + arraysize(code));
+ BitVector* assigned = Analyze(code + 1, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
bool expected = j == kIter || j == kSum;
CHECK_EQ(expected, assigned->Contains(j));
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
+ byte code[] = {kExprLoop, kExprF32Neg, kExprBrTable, 0x0e, 'h', 'e',
+ 'l', 'l', 'o', ',', ' ', 'w',
+ 'o', 'r', 'l', 'd', '!'};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ CHECK_NULL(assigned);
+}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 44e78653e3..5c9c47ba00 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/test-utils.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
@@ -13,18 +15,66 @@ namespace internal {
namespace wasm {
#define EMPTY_FUNCTION(sig_index) 0, SIG_INDEX(sig_index), U16_LE(0)
-#define EMPTY_FUNCTION_SIZE ((size_t)5)
+#define SIZEOF_EMPTY_FUNCTION ((size_t)5)
#define EMPTY_BODY 0
-#define EMPTY_BODY_SIZE ((size_t)1)
+#define SIZEOF_EMPTY_BODY ((size_t)1)
#define NOP_BODY 2, 0, kExprNop
-#define NOP_BODY_SIZE ((size_t)3)
-#define VOID_VOID_SIG 0, kLocalVoid
-#define VOID_VOID_SIG_SIZE ((size_t)2)
-#define INT_INT_SIG 1, kLocalI32, kLocalI32
-#define INT_INT_SIG_SIZE ((size_t)3)
+#define SIZEOF_NOP_BODY ((size_t)3)
-#define SECTION(NAME, EXTRA_SIZE) \
- U32V_1(WASM_SECTION_##NAME##_SIZE + (EXTRA_SIZE)), WASM_SECTION_##NAME
+#define SIG_ENTRY_i_i SIG_ENTRY_x_x(kLocalI32, kLocalI32)
+
+#define UNKNOWN_EMPTY_SECTION_NAME 1, '\0'
+#define UNKNOWN_SECTION_NAME 4, 'l', 'u', 'l', 'z'
+
+#define SECTION(NAME, EXTRA_SIZE) WASM_SECTION_##NAME, U32V_1(EXTRA_SIZE)
+
+#define SIGNATURES_SECTION(count, ...) \
+ SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
+#define FUNCTION_SIGNATURES_SECTION(count, ...) \
+ SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
+
+#define FOO_STRING 3, 'f', 'o', 'o'
+#define NO_LOCAL_NAMES 0
+
+#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
+#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
+
+#define X1(...) __VA_ARGS__
+#define X2(...) __VA_ARGS__, __VA_ARGS__
+#define X3(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+#define X4(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+
+#define ONE_EMPTY_FUNCTION WASM_SECTION_FUNCTION_SIGNATURES, 1 + 1 * 1, 1, X1(0)
+
+#define TWO_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 2 * 1, 2, X2(0)
+
+#define THREE_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 3 * 1, 3, X3(0)
+
+#define FOUR_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 4 * 1, 4, X4(0)
+
+#define ONE_EMPTY_BODY \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 1 * (1 + SIZEOF_EMPTY_BODY), 1, \
+ X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define TWO_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 2 * (1 + SIZEOF_EMPTY_BODY), 2, \
+ X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define THREE_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 3 * (1 + SIZEOF_EMPTY_BODY), 3, \
+ X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define FOUR_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 4 * (1 + SIZEOF_EMPTY_BODY), 4, \
+ X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define SIGNATURES_SECTION_VOID_VOID \
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
#define EXPECT_VERIFIES(data) \
do { \
@@ -49,6 +99,15 @@ namespace wasm {
} \
} while (false)
+#define EXPECT_OK(result) \
+ do { \
+ EXPECT_TRUE(result.ok()); \
+ if (!result.ok()) { \
+ if (result.val) delete result.val; \
+ return; \
+ } \
+ } while (false)
+
static size_t SizeOfVarInt(size_t value) {
size_t size = 0;
do {
@@ -66,7 +125,7 @@ struct LocalTypePair {
{kLocalF32, kAstF32},
{kLocalF64, kAstF64}};
-class WasmModuleVerifyTest : public TestWithZone {
+class WasmModuleVerifyTest : public TestWithIsolateAndZone {
public:
ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
// Add the WASM magic and version number automatically.
@@ -76,14 +135,14 @@ class WasmModuleVerifyTest : public TestWithZone {
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
memcpy(temp + sizeof(header), module_start, size);
- ModuleResult result = DecodeWasmModule(nullptr, zone(), temp, temp + total,
- false, kWasmOrigin);
+ ModuleResult result = DecodeWasmModule(isolate(), zone(), temp,
+ temp + total, false, kWasmOrigin);
delete[] temp;
return result;
}
ModuleResult DecodeModuleNoHeader(const byte* module_start,
const byte* module_end) {
- return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
+ return DecodeWasmModule(isolate(), zone(), module_start, module_end, false,
kWasmOrigin);
}
};
@@ -115,26 +174,26 @@ TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
TEST_F(WasmModuleVerifyTest, OneGlobal) {
static const byte data[] = {
- SECTION(GLOBALS, 7), // --
+ SECTION(GLOBALS, 5), // --
1,
NAME_LENGTH(1),
- 'g', // name
- kMemI32, // memory type
- 0, // exported
+ 'g', // name
+ kLocalI32, // local type
+ 0, // exported
};
{
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(1, global->name_length);
- EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
@@ -144,6 +203,20 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
+TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
+ static const byte data[] = {
+ SECTION(GLOBALS, 5), // --
+ 1,
+ NAME_LENGTH(1),
+ 'g', // name
+ 64, // invalid memory type
+ 0, // exported
+ };
+
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {
@@ -151,11 +224,10 @@ TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
0, // declare 0 globals
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val) delete result.val;
}
-
static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
while (true) {
uint32_t next = val >> 7;
@@ -170,19 +242,17 @@ static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
}
}
-
TEST_F(WasmModuleVerifyTest, NGlobals) {
static const byte data[] = {
- NO_NAME, // name length
- kMemI32, // memory type
- 0, // exported
+ NO_NAME, // name length
+ kLocalF32, // memory type
+ 0, // exported
};
for (uint32_t i = 0; i < 1000000; i = i * 13 + 1) {
std::vector<byte> buffer;
- size_t size =
- WASM_SECTION_GLOBALS_SIZE + SizeOfVarInt(i) + i * sizeof(data);
- const byte globals[] = {U32V_5(size), WASM_SECTION_GLOBALS};
+ size_t size = SizeOfVarInt(i) + i * sizeof(data);
+ const byte globals[] = {WASM_SECTION_GLOBALS, U32V_5(size)};
for (size_t g = 0; g != sizeof(globals); ++g) {
buffer.push_back(globals[g]);
}
@@ -192,7 +262,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
}
ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val) delete result.val;
}
}
@@ -221,38 +291,37 @@ TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
EXPECT_FAILURE(data);
}
-
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
static const byte data[] = {
- SECTION(GLOBALS, 13),
+ SECTION(GLOBALS, 7),
2,
- NO_NAME, // #0: name length
- kMemF32, // memory type
- 0, // exported
- NO_NAME, // #1: name length
- kMemF64, // memory type
- 1, // exported
+ NO_NAME, // #0: name length
+ kLocalF32, // type
+ 0, // exported
+ NO_NAME, // #1: name length
+ kLocalF64, // type
+ 1, // exported
};
{
// Should decode to exactly two globals.
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(2, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* g0 = &result.val->globals[0];
- WasmGlobal* g1 = &result.val->globals[1];
+ const WasmGlobal* g0 = &result.val->globals[0];
+ const WasmGlobal* g1 = &result.val->globals[1];
EXPECT_EQ(0, g0->name_length);
- EXPECT_EQ(MachineType::Float32(), g0->type);
+ EXPECT_EQ(kAstF32, g0->type);
EXPECT_EQ(0, g0->offset);
EXPECT_FALSE(g0->exported);
EXPECT_EQ(0, g1->name_length);
- EXPECT_EQ(MachineType::Float64(), g1->type);
- EXPECT_EQ(0, g1->offset);
+ EXPECT_EQ(kAstF64, g1->type);
+ EXPECT_EQ(8, g1->offset);
EXPECT_TRUE(g1->exported);
if (result.val) delete result.val;
@@ -261,39 +330,31 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
-
TEST_F(WasmModuleVerifyTest, OneSignature) {
{
- static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
- VOID_VOID_SIG};
+ static const byte data[] = {SIGNATURES_SECTION_VOID_VOID};
EXPECT_VERIFIES(data);
}
{
- static const byte data[] = {SECTION(SIGNATURES, 1 + INT_INT_SIG_SIZE), 1,
- INT_INT_SIG};
+ static const byte data[] = {SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_x_x),
+ 1, SIG_ENTRY_i_i};
EXPECT_VERIFIES(data);
}
}
-
TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
static const byte data[] = {
- SECTION(SIGNATURES, 10),
- 3,
- 0,
- kLocalVoid, // void -> void
- 1,
- kLocalI32,
- kLocalF32, // f32 -> i32
- 2,
- kLocalI32,
- kLocalF64,
- kLocalF64, // (f64,f64) -> i32
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_x_x +
+ SIZEOF_SIG_ENTRY_x_xx), // --
+ 3, // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_x_x(kLocalI32, kLocalF32), // f32 -> i32
+ SIG_ENTRY_x_xx(kLocalI32, kLocalF64, kLocalF64), // f64,f64 -> i32
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(3, result.val->signatures.size());
if (result.val->signatures.size() == 3) {
EXPECT_EQ(0, result.val->signatures[0]->return_count());
@@ -309,262 +370,8 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
-
-TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
- static const byte data[] = {
- SECTION(FUNCTIONS, 25), 1,
- // func#0 ------------------------------------------------------
- SIG_INDEX(0), // signature index
- NO_NAME, // name length
- U32_LE(0), // code start offset
- U32_LE(0), // code end offset
- U16_LE(899), // local int32 count
- U16_LE(799), // local int64 count
- U16_LE(699), // local float32 count
- U16_LE(599), // local float64 count
- 0, // exported
- 1 // external
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
- const int kCodeStartOffset = 51;
- const int kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
- // sig#0 -------------------------------------------------------
- VOID_VOID_SIG,
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 19), 1,
- kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName,
- SIG_INDEX(0), // signature index
- NAME_LENGTH(2), 'h', 'i', // name
- U16_LE(1466), // local int32 count
- U16_LE(1355), // local int64 count
- U16_LE(1244), // local float32 count
- U16_LE(1133), // local float64 count
- 1, 0, // size
- kExprNop,
- };
-
- {
- // Should decode to exactly one function.
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(0, result.val->globals.size());
- EXPECT_EQ(1, result.val->signatures.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
- EXPECT_EQ(0, result.val->function_table.size());
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(39, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(1466, function->local_i32_count);
- EXPECT_EQ(1355, function->local_i64_count);
- EXPECT_EQ(1244, function->local_f32_count);
- EXPECT_EQ(1133, function->local_f64_count);
-
- EXPECT_TRUE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
- }
-
- EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
- static const byte data[] = {
- SECTION(SIGNATURES, VOID_VOID_SIG_SIZE), 1,
- // sig#0 -------------------------------------------------------
- VOID_VOID_SIG, SECTION(FUNCTIONS, 6), 1,
- // func#0 ------------------------------------------------------
- kDeclFunctionImport, // no name, no locals, imported
- SIG_INDEX(0),
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(0, function->code_start_offset);
- EXPECT_EQ(0, function->code_end_offset);
-
- EXPECT_EQ(0, function->local_i32_count);
- EXPECT_EQ(0, function->local_i64_count);
- EXPECT_EQ(0, function->local_f32_count);
- EXPECT_EQ(0, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_TRUE(function->external);
-
- if (result.val) delete result.val;
-}
-
-TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
- static const byte kCodeStartOffset = 40;
- static const byte kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 3), 1,
- // sig#0 -------------------------------------------------------
- 0, 0, // void -> void
- SECTION(FUNCTIONS, 7), 1,
- // func#0 ------------------------------------------------------
- 0, // no name, no locals
- 0, 0, // signature index
- 1, 0, // body size
- kExprNop // body
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(0, function->local_i32_count);
- EXPECT_EQ(0, function->local_i64_count);
- EXPECT_EQ(0, function->local_f32_count);
- EXPECT_EQ(0, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
- static const byte kCodeStartOffset = 48;
- static const byte kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 3), 1,
- // sig#0 -------------------------------------------------------
- 0, 0, // void -> void
- SECTION(FUNCTIONS, 15), 1,
- // func#0 ------------------------------------------------------
- kDeclFunctionLocals, 0, 0, // signature index
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 1, 0, // body size
- kExprNop // body
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(513, function->local_i32_count);
- EXPECT_EQ(1027, function->local_i64_count);
- EXPECT_EQ(1541, function->local_f32_count);
- EXPECT_EQ(2055, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
- static const byte kCodeStartOffset = 75;
- static const byte kCodeEndOffset = kCodeStartOffset + 3;
- static const byte kDataSegmentSourceOffset = kCodeEndOffset + 20;
-
- static const byte data[] = {
- SECTION(MEMORY, 3), 28, 28, 1,
- // global#0 --------------------------------------------------
- SECTION(GLOBALS, 7), 1,
- 0, // name length
- kMemU8, // memory type
- 0, // exported
- // sig#0 -----------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
- // func#0 ----------------------------------------------------
- SECTION(FUNCTIONS, 20), 1, kDeclFunctionLocals | kDeclFunctionName, 0,
- 0, // signature index
- 2, 'h', 'i', // name
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 3, 0, // body size
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- // segment#0 -------------------------------------------------
- SECTION(DATA_SEGMENTS, 14), 1,
- U32V_3(0x8b3ae), // dest addr
- U32V_1(5), // source size
- 0, 1, 2, 3, 4, // data bytes
- // rest ------------------------------------------------------
- SECTION(END, 0),
- };
-
- {
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->globals.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->data_segments.size());
-
- WasmGlobal* global = &result.val->globals.back();
-
- EXPECT_EQ(0, global->name_length);
- EXPECT_EQ(MachineType::Uint8(), global->type);
- EXPECT_EQ(0, global->offset);
- EXPECT_FALSE(global->exported);
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(63, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- WasmDataSegment* segment = &result.val->data_segments.back();
-
- EXPECT_EQ(0x8b3ae, segment->dest_addr);
- EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
- EXPECT_EQ(5, segment->source_size);
- EXPECT_TRUE(segment->init);
-
- if (result.val) delete result.val;
- }
-}
-
-
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
- const byte kDataSegmentSourceOffset = 39;
+ const byte kDataSegmentSourceOffset = 30;
const byte data[] = {
SECTION(MEMORY, 3),
28,
@@ -582,12 +389,12 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
{
EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(1, result.val->data_segments.size());
- WasmDataSegment* segment = &result.val->data_segments.back();
+ const WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(0x9bbaa, segment->dest_addr);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
@@ -600,17 +407,16 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
}
-
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
- const byte kDataSegment0SourceOffset = 39;
- const byte kDataSegment1SourceOffset = 39 + 8;
+ const byte kDataSegment0SourceOffset = 30;
+ const byte kDataSegment1SourceOffset = 30 + 8;
const byte data[] = {
SECTION(MEMORY, 3),
28,
28,
1,
- SECTION(DATA_SEGMENTS, 31),
+ SECTION(DATA_SEGMENTS, 23),
2, // segment count
U32V_3(0x7ffee), // #0: dest addr
U32V_1(4), // source size
@@ -634,13 +440,13 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
{
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(2, result.val->data_segments.size());
- WasmDataSegment* s0 = &result.val->data_segments[0];
- WasmDataSegment* s1 = &result.val->data_segments[1];
+ const WasmDataSegment* s0 = &result.val->data_segments[0];
+ const WasmDataSegment* s1 = &result.val->data_segments[1];
EXPECT_EQ(0x7ffee, s0->dest_addr);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
@@ -670,7 +476,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
mem_pages,
mem_pages,
1,
- SECTION(DATA_SEGMENTS, 14),
+ SECTION(DATA_SEGMENTS, 8),
1,
U32V_3(dest_addr),
U32V_1(source_size),
@@ -687,41 +493,36 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
}
}
-
-// To make below tests for indirect calls much shorter.
-#define FUNCTION(sig_index, external) kDeclFunctionImport, SIG_INDEX(sig_index)
-
TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 0),
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ------------------------------------------------------
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 2), 1, U32V_1(0)};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(1, result.val->signatures.size());
EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->function_table.size());
- EXPECT_EQ(0, result.val->function_table[0]);
+ EXPECT_EQ(1, result.val->function_tables.size());
+ EXPECT_EQ(1, result.val->function_tables[0].values.size());
+ EXPECT_EQ(0, result.val->function_tables[0].values[0]);
}
if (result.val) delete result.val;
}
-
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 5), 2, 0, 0, // void -> void
- 0, kLocalI32, // void -> i32
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 13), 4, FUNCTION(0, 1), // --
- FUNCTION(1, 1), // --
- FUNCTION(0, 1), // --
- FUNCTION(1, 1), // --
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_v_x),
+ 2, // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_v_x(kLocalI32), // void -> i32
+ // funcs ------------------------------------------------------
+ FOUR_EMPTY_FUNCTIONS,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 9), 8,
U32V_1(0), // --
@@ -732,26 +533,26 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
U32V_1(1), // --
U32V_1(2), // --
U32V_1(3), // --
- };
+ FOUR_EMPTY_BODIES};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(2, result.val->signatures.size());
EXPECT_EQ(4, result.val->functions.size());
- EXPECT_EQ(8, result.val->function_table.size());
+ EXPECT_EQ(1, result.val->function_tables.size());
+ EXPECT_EQ(8, result.val->function_tables[0].values.size());
for (int i = 0; i < 8; i++) {
- EXPECT_EQ(i & 3, result.val->function_table[i]);
+ EXPECT_EQ(i & 3, result.val->function_tables[0].values[i]);
}
}
if (result.val) delete result.val;
}
-
TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
+ SIGNATURES_SECTION_VOID_VOID,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 3), 1, 0, 0,
};
@@ -759,13 +560,12 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
EXPECT_FAILURE(data);
}
-
TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
+ SIGNATURES_SECTION_VOID_VOID,
// functions ---------------------------------------------------
- SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 1),
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 3), 1, 1, 0,
};
@@ -773,12 +573,10 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
EXPECT_FAILURE(data);
}
-
class WasmSignatureDecodeTest : public TestWithZone {};
-
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
- static const byte data[] = {0, 0};
+ static const byte data[] = {SIG_ENTRY_v_v};
base::AccountingAllocator allocator;
Zone zone(&allocator);
FunctionSig* sig =
@@ -789,11 +587,10 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
EXPECT_EQ(0, sig->return_count());
}
-
TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair ret_type = kLocalTypes[i];
- const byte data[] = {0, ret_type.code};
+ const byte data[] = {SIG_ENTRY_x(ret_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -804,11 +601,10 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair param_type = kLocalTypes[i];
- const byte data[] = {1, 0, param_type.code};
+ const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -819,15 +615,12 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair ret_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalTypePair param_type = kLocalTypes[j];
- const byte data[] = {1, // param count
- ret_type.code, // ret
- param_type.code}; // param
+ const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -840,16 +633,13 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair p0_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalTypePair p1_type = kLocalTypes[j];
- const byte data[] = {2, // param count
- kLocalI32, // ret
- p0_type.code, // p0
- p1_type.code}; // p1
+ const byte data[] = {
+ SIG_ENTRY_x_xx(kLocalI32, p0_type.code, p1_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -862,7 +652,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
byte data[256];
for (int p = 0; p <= 255; p = p + 1 + p * 3) {
@@ -877,11 +666,10 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte kInvalidType = 76;
- for (int i = 1; i < 3; i++) {
- byte data[] = {2, kLocalI32, kLocalI32, kLocalI32};
+ for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
+ byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
data[i] = kInvalidType;
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -889,47 +677,56 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
}
}
-
-TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type) {
- static const int kParamCount = 3;
- for (int i = 0; i < kParamCount; i++) {
- byte data[] = {kParamCount, kLocalI32, kLocalI32, kLocalI32, kLocalI32};
- data[i + 2] = kLocalVoid;
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
- EXPECT_EQ(nullptr, sig);
- }
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
+ static const byte data[] = {SIG_ENTRY_x_x(kLocalVoid, kLocalI32)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
}
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
+ static const byte data[] = {SIG_ENTRY_x_x(kLocalI32, kLocalVoid)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+}
-class WasmFunctionVerifyTest : public TestWithZone {};
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
+ static const byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalVoid)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+}
+class WasmFunctionVerifyTest : public TestWithIsolateAndZone {};
TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
static const byte data[] = {
- 0, kLocalVoid, // signature
- 4, // locals
- 3, kLocalI32, // --
- 4, kLocalI64, // --
- 5, kLocalF32, // --
- 6, kLocalF64, // --
- kExprNop // body
+ SIG_ENTRY_v_v, // signature entry
+ 4, // locals
+ 3,
+ kLocalI32, // --
+ 4,
+ kLocalI64, // --
+ 5,
+ kLocalF32, // --
+ 6,
+ kLocalF64, // --
+ kExprNop // body
};
- FunctionResult result = DecodeWasmFunction(nullptr, zone(), nullptr, data,
+ FunctionResult result = DecodeWasmFunction(isolate(), zone(), nullptr, data,
data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val && result.ok()) {
WasmFunction* function = result.val;
EXPECT_EQ(0, function->sig->parameter_count());
EXPECT_EQ(0, function->sig->return_count());
EXPECT_EQ(0, function->name_offset);
- EXPECT_EQ(2, function->code_start_offset);
+ EXPECT_EQ(SIZEOF_SIG_ENTRY_v_v, function->code_start_offset);
EXPECT_EQ(arraysize(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
- EXPECT_FALSE(function->external);
- EXPECT_FALSE(function->exported);
}
if (result.val) delete result.val;
@@ -942,47 +739,51 @@ TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
const byte data[] = {
- 1, // Section size.
0, // Empty section name.
// No section name, no content, nothing but sadness.
+ 0, // No section content.
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionEmpty) {
const byte data[] = {
- 5, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME, 0,
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionNonEmpty) {
const byte data[] = {
- 10, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
- // Section content:
- 0xff, 0xff, 0xff, 0xff, 0xff,
+ UNKNOWN_SECTION_NAME,
+ 5, // section size
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff, // section data
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, SignatureFollowedByEmptyUnknownSection) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 5, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME,
+ 0 // empty section
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 10, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME,
+ 5, // section size
0xff, 0xff, 0xff, 0xff, 0xff,
};
EXPECT_VERIFIES(data);
@@ -990,29 +791,46 @@ TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSectionWithLongLEB) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 0x85, 0x80, 0x80, 0x80, 0x00, // Section size: 1 but in a 5-byte LEB.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME, 0x81, 0x80, 0x80, 0x80,
+ 0x00, // section size: 1 but in a 5-byte LEB
+ 0,
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionOverflow) {
static const byte data[] = {
- 13, // Section size.
- 1, // Section name length.
- '\0', // Section name.
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 9, // section size
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10, // 10 byte section
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
static const byte data[] = {
- 0xff, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xffffffff
- 1, '\0', // Section name and name length.
- 1, 2, 3, 4, // 4 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0x0f, // Section size LEB128 0xffffffff
+ 1,
+ 2,
+ 3,
+ 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
@@ -1020,36 +838,42 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
TEST_F(WasmModuleVerifyTest, UnknownSectionLoop) {
// Would infinite loop decoding if wrapping and allowed.
static const byte data[] = {
- 0xfa, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xfffffffa
- 1, '\0', // Section name and name length.
- 1, 2, 3, 4, // 4 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 1,
+ 2,
+ 3,
+ 4, // 4 byte section
+ 0xfa,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0x0f, // Section size LEB128 0xfffffffa
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
static const byte data[] = {
- 3, // Section size.
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 1, // section size
+ 0, // one byte section
+ SECTION(GLOBALS, 4),
1,
- '\0', // Section name: LEB128 1, string '\0'
- 0, // one byte section
- SECTION(GLOBALS, 7),
- 1,
- 0, // name length
- kMemI32, // memory type
- 0, // exported
+ 0, // name length
+ kLocalI32, // memory type
+ 0, // exported
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(0, global->name_length);
- EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
@@ -1062,26 +886,36 @@ TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ImportTable_nosigs) {
+TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
static const byte data[] = {SECTION(IMPORT_TABLE, 1), 0};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_nosigs2) {
+ static const byte data[] = {
+ SECTION(IMPORT_TABLE, 6), 1, // sig table
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
+ };
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, SECTION(IMPORT_TABLE, 6), 1,
- IMPORT_SIG_INDEX(0), // sig index
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
+ SECTION(SIGNATURES, 1), 0, // --
+ SECTION(IMPORT_TABLE, 6), 1, // --
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1, // --
IMPORT_SIG_INDEX(0), // sig index
@@ -1095,9 +929,8 @@ TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1, // --
IMPORT_SIG_INDEX(0), // sig index
@@ -1110,9 +943,8 @@ TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1,
IMPORT_SIG_INDEX(0), // sig index
@@ -1126,28 +958,33 @@ TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
- static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1,
- EMPTY_FUNCTION(0),
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION,
SECTION(EXPORT_TABLE, 1),
- 0};
- EXPECT_VERIFIES(data);
+ 0, // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(0, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(FUNCTIONS, 1), 0,
- SECTION(EXPORT_TABLE, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
+ };
// TODO(titzer): current behavior treats empty functions section as missing.
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions1) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(EXPORT_TABLE, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
+ };
EXPECT_FAILURE(data);
}
@@ -1157,89 +994,88 @@ TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions2) {
}
TEST_F(WasmModuleVerifyTest, ExportTableOne) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 7),
- 1, // exports
- FUNC_INDEX(0), // --
- NO_NAME // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 3),
+ 1, // exports
+ FUNC_INDEX(0), // --
+ NO_NAME, // --
+ ONE_EMPTY_BODY};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 12),
- 2, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(4),
- 'n',
- 'a',
- 'm',
- 'e', // --
- FUNC_INDEX(0), // --
- NAME_LENGTH(3),
- 'n',
- 'o',
- 'm' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 12),
+ 2, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(4),
+ 'n',
+ 'a',
+ 'm',
+ 'e', // --
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(3),
+ 'n',
+ 'o',
+ 'm', // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(2, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThree) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 10),
- 3, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(1),
- 'a', // --
- FUNC_INDEX(1), // --
- NAME_LENGTH(1),
- 'b', // --
- FUNC_INDEX(2), // --
- NAME_LENGTH(1),
- 'c' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 10),
+ 3, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(1),
+ 'a', // --
+ FUNC_INDEX(1), // --
+ NAME_LENGTH(1),
+ 'b', // --
+ FUNC_INDEX(2), // --
+ NAME_LENGTH(1),
+ 'c', // --
+ THREE_EMPTY_BODIES};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(3, result.val->functions.size());
+ EXPECT_EQ(3, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
for (int i = 0; i < 6; i++) {
- const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 5),
- 1, // exports
- FUNC_INDEX(i), // --
- NAME_LENGTH(2),
- 'e',
- 'x', // --
- };
+ const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 5),
+ 1, // exports
+ FUNC_INDEX(i), // --
+ NAME_LENGTH(2),
+ 'e',
+ 'x', // --
+ THREE_EMPTY_BODIES};
if (i < 3) {
EXPECT_VERIFIES(data);
@@ -1251,12 +1087,9 @@ TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
SECTION(EXPORT_TABLE, 1 + 6),
1, // exports
FUNC_INDEX(0), // --
@@ -1270,67 +1103,68 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
}
}
-#define SIGNATURES_SECTION(count, ...) \
- SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
-#define FUNCTION_SIGNATURES_SECTION(count, ...) \
- SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
-
-#define FOO_STRING 3, 'f', 'o', 'o'
-#define NO_LOCAL_NAMES 0
-
-#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
-#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
-
TEST_F(WasmModuleVerifyTest, FunctionSignatures_empty) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(FUNCTION_SIGNATURES, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, // --
+ SECTION(FUNCTION_SIGNATURES, 1), 0 // --
+ }; // --
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionSignatures_one) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(1, 0)};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0) // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
- static const byte data[] = {EMPTY_SIGNATURES_SECTION,
- EMPTY_FUNCTION_SIGNATURES_SECTION,
- EMPTY_FUNCTION_BODIES_SECTION};
+ static const byte data[] = {
+ EMPTY_SIGNATURES_SECTION, // --
+ EMPTY_FUNCTION_SIGNATURES_SECTION, // --
+ EMPTY_FUNCTION_BODIES_SECTION // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1, EMPTY_BODY};
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, EMPTY_BODY // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_nop) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + NOP_BODY_SIZE), 1, NOP_BODY};
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_NOP_BODY), 1, NOP_BODY // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch1) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(2, 0, 0),
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1,
- EMPTY_BODY};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, // --
+ EMPTY_BODY // --
+ };
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch2) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + 2 * NOP_BODY_SIZE),
- 2,
- NOP_BODY,
- NOP_BODY};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_NOP_BODY), // --
+ 2, // --
+ NOP_BODY, // --
+ NOP_BODY // --
+ };
EXPECT_FAILURE(data);
}
@@ -1343,9 +1177,9 @@ TEST_F(WasmModuleVerifyTest, Names_empty) {
TEST_F(WasmModuleVerifyTest, Names_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE),
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY),
1,
EMPTY_BODY, // --
SECTION(NAMES, 1 + 5),
@@ -1358,9 +1192,9 @@ TEST_F(WasmModuleVerifyTest, Names_one_empty) {
TEST_F(WasmModuleVerifyTest, Names_two_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(FUNCTION_BODIES, 1 + 2 * EMPTY_BODY_SIZE), // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_EMPTY_BODY), // --
2,
EMPTY_BODY,
EMPTY_BODY, // --
diff --git a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
new file mode 100644
index 0000000000..be587c28bd
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/switch-logic.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+class SwitchLogicTest : public TestWithZone {};
+
+void CheckNodeValues(CaseNode* node, int begin, int end) {
+ CHECK_EQ(node->begin, begin);
+ CHECK_EQ(node->end, end);
+}
+
+TEST_F(SwitchLogicTest, Single_Table_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(14);
+ values.push_back(12);
+ values.push_back(15);
+ values.push_back(19);
+ values.push_back(18);
+ values.push_back(16);
+ CaseNode* root = OrderCases(&values, zone());
+ CHECK_NULL(root->left);
+ CHECK_NULL(root->right);
+ CheckNodeValues(root, 12, 19);
+}
+
+TEST_F(SwitchLogicTest, Balanced_Tree_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(5);
+ values.push_back(1);
+ values.push_back(6);
+ values.push_back(9);
+ values.push_back(-4);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 5, 5);
+ CheckNodeValues(root->left, -4, -4);
+ CHECK_NULL(root->left->left);
+ CheckNodeValues(root->left->right, 1, 1);
+ CHECK_NULL(root->left->right->left);
+ CHECK_NULL(root->left->right->right);
+ CheckNodeValues(root->right, 6, 6);
+ CHECK_NULL(root->right->left);
+ CheckNodeValues(root->right->right, 9, 9);
+ CHECK_NULL(root->right->right->left);
+ CHECK_NULL(root->right->right->right);
+}
+
+TEST_F(SwitchLogicTest, Hybrid_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(1);
+ values.push_back(2);
+ values.push_back(3);
+ values.push_back(4);
+ values.push_back(7);
+ values.push_back(10);
+ values.push_back(11);
+ values.push_back(12);
+ values.push_back(13);
+ values.push_back(16);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 7, 7);
+ CheckNodeValues(root->left, 1, 4);
+ CheckNodeValues(root->right, 10, 13);
+ CheckNodeValues(root->right->right, 16, 16);
+}
+
+TEST_F(SwitchLogicTest, Single_Case) {
+ ZoneVector<int> values(zone());
+ values.push_back(3);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 3, 3);
+ CHECK_NULL(root->left);
+ CHECK_NULL(root->right);
+}
+
+TEST_F(SwitchLogicTest, Empty_Case) {
+ ZoneVector<int> values(zone());
+ CaseNode* root = OrderCases(&values, zone());
+ CHECK_NULL(root);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index ec188c00c9..2b782f5dc7 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -18,7 +18,6 @@ class WasmMacroGenTest : public TestWithZone {};
EXPECT_EQ(size, sizeof(code)); \
} while (false)
-
TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(2, WASM_ONE);
EXPECT_SIZE(2, WASM_ZERO);
@@ -48,47 +47,44 @@ TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(9, WASM_F64(-9818934.0));
}
-
TEST_F(WasmMacroGenTest, Statements) {
EXPECT_SIZE(1, WASM_NOP);
EXPECT_SIZE(4, WASM_SET_LOCAL(0, WASM_ZERO));
- EXPECT_SIZE(4, WASM_STORE_GLOBAL(0, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_SET_GLOBAL(0, WASM_ZERO));
EXPECT_SIZE(7, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(4, WASM_IF(WASM_ZERO, WASM_NOP));
+ EXPECT_SIZE(5, WASM_IF(WASM_ZERO, WASM_NOP));
- EXPECT_SIZE(5, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(7, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_SELECT(WASM_ZERO, WASM_NOP, WASM_NOP));
EXPECT_SIZE(3, WASM_BR(0));
EXPECT_SIZE(5, WASM_BR_IF(0, WASM_ZERO));
- EXPECT_SIZE(3, WASM_BLOCK(1, WASM_NOP));
- EXPECT_SIZE(4, WASM_BLOCK(2, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(3, WASM_BLOCK(WASM_NOP));
+ EXPECT_SIZE(4, WASM_BLOCK(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_INFINITE_LOOP);
- EXPECT_SIZE(3, WASM_LOOP(1, WASM_NOP));
- EXPECT_SIZE(4, WASM_LOOP(2, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_LOOP(1, WASM_BR(0)));
- EXPECT_SIZE(7, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_SIZE(3, WASM_LOOP(WASM_NOP));
+ EXPECT_SIZE(4, WASM_LOOP(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_BR(0)));
+ EXPECT_SIZE(7, WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_SIZE(1, WASM_RETURN0);
- EXPECT_SIZE(3, WASM_RETURN(WASM_ZERO));
- EXPECT_SIZE(5, WASM_RETURN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(2, WASM_RETURN0);
+ EXPECT_SIZE(4, WASM_RETURN1(WASM_ZERO));
EXPECT_SIZE(1, WASM_UNREACHABLE);
}
-
TEST_F(WasmMacroGenTest, MacroStatements) {
- EXPECT_SIZE(8, WASM_WHILE(WASM_I8(0), WASM_NOP));
+ EXPECT_SIZE(10, WASM_WHILE(WASM_I8(0), WASM_NOP));
EXPECT_SIZE(7, WASM_INC_LOCAL(0));
EXPECT_SIZE(7, WASM_INC_LOCAL_BY(0, 3));
@@ -97,63 +93,62 @@ TEST_F(WasmMacroGenTest, MacroStatements) {
}
TEST_F(WasmMacroGenTest, BrTable) {
- EXPECT_SIZE(8, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
+ EXPECT_SIZE(9, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
+ EXPECT_SIZE(11, WASM_BR_TABLEV(WASM_ZERO, WASM_ZERO, 1, BR_TARGET(1)));
}
-
TEST_F(WasmMacroGenTest, Expressions) {
EXPECT_SIZE(2, WASM_GET_LOCAL(0));
EXPECT_SIZE(2, WASM_GET_LOCAL(1));
EXPECT_SIZE(2, WASM_GET_LOCAL(12));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(0));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(1));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(12));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(0));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(1));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(12));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
- EXPECT_SIZE(4, WASM_BRV(1, WASM_ZERO));
- EXPECT_SIZE(6, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BRV(1, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(4, WASM_BLOCK(1, WASM_ZERO));
- EXPECT_SIZE(5, WASM_BLOCK(2, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_BLOCK(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(4, WASM_LOOP(1, WASM_ZERO));
- EXPECT_SIZE(5, WASM_LOOP(2, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOOP(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallFunction) {
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(0));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(1));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(11));
- EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_FUNCTION1(0, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_FUNCTION2(1, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallImport) {
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(0));
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(1));
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(11));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(0));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(1));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(11));
- EXPECT_SIZE(4, WASM_CALL_IMPORT(0, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_IMPORT(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_IMPORT1(0, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_IMPORT2(1, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallIndirect) {
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(11, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_INDIRECT(0, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(8, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(9, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(5, WASM_I32_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_SUB(WASM_ZERO, WASM_ZERO));
@@ -189,7 +184,6 @@ TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(3, WASM_I32_EQZ(WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(5, WASM_I64_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_SUB(WASM_ZERO, WASM_ZERO));
@@ -225,7 +219,6 @@ TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(3, WASM_I64_EQZ(WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Float32Ops) {
EXPECT_SIZE(5, WASM_F32_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_F32_SUB(WASM_ZERO, WASM_ZERO));
@@ -250,7 +243,6 @@ TEST_F(WasmMacroGenTest, Float32Ops) {
EXPECT_SIZE(5, WASM_F32_GE(WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Float64Ops) {
EXPECT_SIZE(5, WASM_F64_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_F64_SUB(WASM_ZERO, WASM_ZERO));
@@ -275,7 +267,6 @@ TEST_F(WasmMacroGenTest, Float64Ops) {
EXPECT_SIZE(5, WASM_F64_GE(WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Conversions) {
EXPECT_SIZE(3, WASM_I32_SCONVERT_F32(WASM_ZERO));
EXPECT_SIZE(3, WASM_I32_SCONVERT_F64(WASM_ZERO));
@@ -317,7 +308,6 @@ TEST_F(WasmMacroGenTest, LoadsAndStores) {
}
}
-
TEST_F(WasmMacroGenTest, LoadsAndStoresWithOffset) {
for (size_t i = 0; i < arraysize(kMemTypes); i++) {
EXPECT_SIZE(5, WASM_LOAD_MEM_OFFSET(kMemTypes[i], 11, WASM_ZERO));