aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2014-11-14 00:52:27 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2014-11-14 16:34:58 +0100
commit5d1b6d3e0fa4b97a490ef964be48aed9872e3ec1 (patch)
treeab5f510c4d83b175681de629395525bf7ec7cedb /deps/v8/test/unittests
parent3b3d89bad26f5dfebe73fef6ae284ee78acbd5c9 (diff)
downloadandroid-node-v8-5d1b6d3e0fa4b97a490ef964be48aed9872e3ec1.tar.gz
android-node-v8-5d1b6d3e0fa4b97a490ef964be48aed9872e3ec1.tar.bz2
android-node-v8-5d1b6d3e0fa4b97a490ef964be48aed9872e3ec1.zip
deps: upgrade v8 to 3.30.37
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/DEPS4
-rw-r--r--deps/v8/test/unittests/base/bits-unittest.cc281
-rw-r--r--deps/v8/test/unittests/base/cpu-unittest.cc49
-rw-r--r--deps/v8/test/unittests/base/division-by-constant-unittest.cc134
-rw-r--r--deps/v8/test/unittests/base/flags-unittest.cc104
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc196
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc301
-rw-r--r--deps/v8/test/unittests/base/platform/mutex-unittest.cc91
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc110
-rw-r--r--deps/v8/test/unittests/base/platform/semaphore-unittest.cc145
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc186
-rw-r--r--deps/v8/test/unittests/base/sys-info-unittest.cc32
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc53
-rw-r--r--deps/v8/test/unittests/char-predicates-unittest.cc121
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc2127
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc2029
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc456
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc292
-rw-r--r--deps/v8/test/unittests/compiler/compiler-test-utils.h57
-rw-r--r--deps/v8/test/unittests/compiler/diamond-unittest.cc161
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc123
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc98
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h79
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc606
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc589
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h241
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc305
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc147
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc192
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc1227
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc314
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc805
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc1070
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h171
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc513
-rw-r--r--deps/v8/test/unittests/compiler/select-lowering-unittest.cc62
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc603
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc215
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc130
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc370
-rw-r--r--deps/v8/test/unittests/compiler/zone-pool-unittest.cc162
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc396
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc43
-rw-r--r--deps/v8/test/unittests/libplatform/task-queue-unittest.cc60
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc48
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc45
-rw-r--r--deps/v8/test/unittests/test-utils.cc104
-rw-r--r--deps/v8/test/unittests/test-utils.h108
-rw-r--r--deps/v8/test/unittests/unittests.gyp126
-rw-r--r--deps/v8/test/unittests/unittests.status6
51 files changed, 15892 insertions, 0 deletions
diff --git a/deps/v8/test/unittests/DEPS b/deps/v8/test/unittests/DEPS
new file mode 100644
index 0000000000..4df37f80af
--- /dev/null
+++ b/deps/v8/test/unittests/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+src",
+ "+testing"
+]
diff --git a/deps/v8/test/unittests/base/bits-unittest.cc b/deps/v8/test/unittests/base/bits-unittest.cc
new file mode 100644
index 0000000000..9caba8484e
--- /dev/null
+++ b/deps/v8/test/unittests/base/bits-unittest.cc
@@ -0,0 +1,281 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "testing/gtest-support.h"
+
+#ifdef DEBUG
+#define DISABLE_IN_RELEASE(Name) Name
+#else
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+TEST(Bits, CountPopulation32) {
+ EXPECT_EQ(0u, CountPopulation32(0));
+ EXPECT_EQ(1u, CountPopulation32(1));
+ EXPECT_EQ(8u, CountPopulation32(0x11111111));
+ EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
+ EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
+ EXPECT_EQ(32u, CountPopulation32(0xffffffff));
+}
+
+
+TEST(Bits, CountPopulation64) {
+ EXPECT_EQ(0u, CountPopulation64(0));
+ EXPECT_EQ(1u, CountPopulation64(1));
+ EXPECT_EQ(2u, CountPopulation64(0x8000000000000001));
+ EXPECT_EQ(8u, CountPopulation64(0x11111111));
+ EXPECT_EQ(16u, CountPopulation64(0xf0f0f0f0));
+ EXPECT_EQ(24u, CountPopulation64(0xfff0f0ff));
+ EXPECT_EQ(32u, CountPopulation64(0xffffffff));
+ EXPECT_EQ(16u, CountPopulation64(0x1111111111111111));
+ EXPECT_EQ(32u, CountPopulation64(0xf0f0f0f0f0f0f0f0));
+ EXPECT_EQ(48u, CountPopulation64(0xfff0f0fffff0f0ff));
+ EXPECT_EQ(64u, CountPopulation64(0xffffffffffffffff));
+}
+
+
+TEST(Bits, CountLeadingZeros32) {
+ EXPECT_EQ(32u, CountLeadingZeros32(0));
+ EXPECT_EQ(31u, CountLeadingZeros32(1));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
+ }
+ EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
+}
+
+
+TEST(Bits, CountLeadingZeros64) {
+ EXPECT_EQ(64u, CountLeadingZeros64(0));
+ EXPECT_EQ(63u, CountLeadingZeros64(1));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_EQ(63u - shift, CountLeadingZeros64(V8_UINT64_C(1) << shift));
+ }
+ EXPECT_EQ(36u, CountLeadingZeros64(0x0f0f0f0f));
+ EXPECT_EQ(4u, CountLeadingZeros64(0x0f0f0f0f00000000));
+}
+
+
+TEST(Bits, CountTrailingZeros32) {
+ EXPECT_EQ(32u, CountTrailingZeros32(0));
+ EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
+}
+
+
+TEST(Bits, CountTrailingZeros64) {
+ EXPECT_EQ(64u, CountTrailingZeros64(0));
+ EXPECT_EQ(63u, CountTrailingZeros64(0x8000000000000000));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_EQ(shift, CountTrailingZeros64(V8_UINT64_C(1) << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros64(0xf0f0f0f0));
+ EXPECT_EQ(36u, CountTrailingZeros64(0xf0f0f0f000000000));
+}
+
+
+TEST(Bits, IsPowerOfTwo32) {
+ EXPECT_FALSE(IsPowerOfTwo32(0U));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_TRUE(IsPowerOfTwo32(1U << shift));
+ EXPECT_FALSE(IsPowerOfTwo32((1U << shift) + 5U));
+ EXPECT_FALSE(IsPowerOfTwo32(~(1U << shift)));
+ }
+ TRACED_FORRANGE(uint32_t, shift, 2, 31) {
+ EXPECT_FALSE(IsPowerOfTwo32((1U << shift) - 1U));
+ }
+ EXPECT_FALSE(IsPowerOfTwo32(0xffffffff));
+}
+
+
+TEST(Bits, IsPowerOfTwo64) {
+ EXPECT_FALSE(IsPowerOfTwo64(0U));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_TRUE(IsPowerOfTwo64(V8_UINT64_C(1) << shift));
+ EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) + 5U));
+ EXPECT_FALSE(IsPowerOfTwo64(~(V8_UINT64_C(1) << shift)));
+ }
+ TRACED_FORRANGE(uint32_t, shift, 2, 63) {
+ EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) - 1U));
+ }
+ EXPECT_FALSE(IsPowerOfTwo64(V8_UINT64_C(0xffffffffffffffff)));
+}
+
+
+TEST(Bits, RoundUpToPowerOfTwo32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
+ }
+ EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
+ EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
+ EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
+}
+
+
+TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
+ ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
+ "0x80000000");
+}
+
+
+TEST(Bits, RoundDownToPowerOfTwo32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(1u << shift, RoundDownToPowerOfTwo32(1u << shift));
+ }
+ EXPECT_EQ(0u, RoundDownToPowerOfTwo32(0));
+ EXPECT_EQ(4u, RoundDownToPowerOfTwo32(5));
+ EXPECT_EQ(0x80000000u, RoundDownToPowerOfTwo32(0x80000001u));
+}
+
+
+TEST(Bits, RotateRight32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(0u, RotateRight32(0u, shift));
+ }
+ EXPECT_EQ(1u, RotateRight32(1, 0));
+ EXPECT_EQ(1u, RotateRight32(2, 1));
+ EXPECT_EQ(0x80000000u, RotateRight32(1, 1));
+}
+
+
+TEST(Bits, RotateRight64) {
+ TRACED_FORRANGE(uint64_t, shift, 0, 63) {
+ EXPECT_EQ(0u, RotateRight64(0u, shift));
+ }
+ EXPECT_EQ(1u, RotateRight64(1, 0));
+ EXPECT_EQ(1u, RotateRight64(2, 1));
+ EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
+}
+
+
+TEST(Bits, SignedAddOverflow32) {
+ int32_t val = 0;
+ EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
+ EXPECT_EQ(0, val);
+ EXPECT_TRUE(
+ SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+ EXPECT_TRUE(
+ SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+ EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
+ std::numeric_limits<int32_t>::max(), &val));
+ EXPECT_EQ(-2, val);
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
+ EXPECT_EQ(i + j, val);
+ }
+ }
+}
+
+
+TEST(Bits, SignedSubOverflow32) {
+ int32_t val = 0;
+ EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
+ EXPECT_EQ(0, val);
+ EXPECT_TRUE(
+ SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+ EXPECT_TRUE(
+ SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
+ EXPECT_EQ(i - j, val);
+ }
+ }
+}
+
+
+TEST(Bits, SignedMulHigh32) {
+ EXPECT_EQ(0, SignedMulHigh32(0, 0));
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ TRACED_FORRANGE(int32_t, j, 1, i) { EXPECT_EQ(0, SignedMulHigh32(i, j)); }
+ }
+ EXPECT_EQ(-1073741824, SignedMulHigh32(std::numeric_limits<int32_t>::max(),
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_EQ(-1073741824, SignedMulHigh32(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max()));
+ EXPECT_EQ(1, SignedMulHigh32(1024 * 1024 * 1024, 4));
+ EXPECT_EQ(2, SignedMulHigh32(8 * 1024, 1024 * 1024));
+}
+
+
+TEST(Bits, SignedMulHighAndAdd32) {
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ EXPECT_EQ(i, SignedMulHighAndAdd32(0, 0, i));
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_EQ(i, SignedMulHighAndAdd32(j, j, i));
+ }
+ EXPECT_EQ(i + 1, SignedMulHighAndAdd32(1024 * 1024 * 1024, 4, i));
+ }
+}
+
+
+TEST(Bits, SignedDiv32) {
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(),
+ SignedDiv32(std::numeric_limits<int32_t>::min(), -1));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(),
+ SignedDiv32(std::numeric_limits<int32_t>::max(), 1));
+ TRACED_FORRANGE(int32_t, i, 0, 50) {
+ EXPECT_EQ(0, SignedDiv32(i, 0));
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_EQ(1, SignedDiv32(j, j));
+ EXPECT_EQ(i / j, SignedDiv32(i, j));
+ EXPECT_EQ(-i / j, SignedDiv32(i, -j));
+ }
+ }
+}
+
+
+TEST(Bits, SignedMod32) {
+ EXPECT_EQ(0, SignedMod32(std::numeric_limits<int32_t>::min(), -1));
+ EXPECT_EQ(0, SignedMod32(std::numeric_limits<int32_t>::max(), 1));
+ TRACED_FORRANGE(int32_t, i, 0, 50) {
+ EXPECT_EQ(0, SignedMod32(i, 0));
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_EQ(0, SignedMod32(j, j));
+ EXPECT_EQ(i % j, SignedMod32(i, j));
+ EXPECT_EQ(i % j, SignedMod32(i, -j));
+ }
+ }
+}
+
+
+TEST(Bits, UnsignedDiv32) {
+ TRACED_FORRANGE(uint32_t, i, 0, 50) {
+ EXPECT_EQ(0u, UnsignedDiv32(i, 0));
+ TRACED_FORRANGE(uint32_t, j, i + 1, 100) {
+ EXPECT_EQ(1u, UnsignedDiv32(j, j));
+ EXPECT_EQ(i / j, UnsignedDiv32(i, j));
+ }
+ }
+}
+
+
+TEST(Bits, UnsignedMod32) {
+ TRACED_FORRANGE(uint32_t, i, 0, 50) {
+ EXPECT_EQ(0u, UnsignedMod32(i, 0));
+ TRACED_FORRANGE(uint32_t, j, i + 1, 100) {
+ EXPECT_EQ(0u, UnsignedMod32(j, j));
+ EXPECT_EQ(i % j, UnsignedMod32(i, j));
+ }
+ }
+}
+
+} // namespace bits
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/cpu-unittest.cc b/deps/v8/test/unittests/base/cpu-unittest.cc
new file mode 100644
index 0000000000..5c58f86238
--- /dev/null
+++ b/deps/v8/test/unittests/base/cpu-unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(CPUTest, FeatureImplications) {
+ CPU cpu;
+
+ // ia32 and x64 features
+ EXPECT_TRUE(!cpu.has_sse() || cpu.has_mmx());
+ EXPECT_TRUE(!cpu.has_sse2() || cpu.has_sse());
+ EXPECT_TRUE(!cpu.has_sse3() || cpu.has_sse2());
+ EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
+ EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
+ EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
+
+ // arm features
+ EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
+}
+
+
+TEST(CPUTest, RequiredFeatures) {
+ CPU cpu;
+
+#if V8_HOST_ARCH_ARM
+ EXPECT_TRUE(cpu.has_fpu());
+#endif
+
+#if V8_HOST_ARCH_IA32
+ EXPECT_TRUE(cpu.has_fpu());
+ EXPECT_TRUE(cpu.has_sahf());
+#endif
+
+#if V8_HOST_ARCH_X64
+ EXPECT_TRUE(cpu.has_fpu());
+ EXPECT_TRUE(cpu.has_cmov());
+ EXPECT_TRUE(cpu.has_mmx());
+ EXPECT_TRUE(cpu.has_sse());
+ EXPECT_TRUE(cpu.has_sse2());
+#endif
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/division-by-constant-unittest.cc b/deps/v8/test/unittests/base/division-by-constant-unittest.cc
new file mode 100644
index 0000000000..58816db79e
--- /dev/null
+++ b/deps/v8/test/unittests/base/division-by-constant-unittest.cc
@@ -0,0 +1,134 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check all examples from table 10-1 of "Hacker's Delight".
+
+#include "src/base/division-by-constant.h"
+
+#include <stdint.h>
+
+#include <ostream> // NOLINT
+
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os,
+ const MagicNumbersForDivision<T>& mag) {
+ return os << "{ multiplier: " << mag.multiplier << ", shift: " << mag.shift
+ << ", add: " << mag.add << " }";
+}
+
+
+// Some abbreviations...
+
+typedef MagicNumbersForDivision<uint32_t> M32;
+typedef MagicNumbersForDivision<uint64_t> M64;
+
+
+static M32 s32(int32_t d) {
+ return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
+}
+
+
+static M64 s64(int64_t d) {
+ return SignedDivisionByConstant<uint64_t>(static_cast<uint64_t>(d));
+}
+
+
+static M32 u32(uint32_t d) { return UnsignedDivisionByConstant<uint32_t>(d); }
+static M64 u64(uint64_t d) { return UnsignedDivisionByConstant<uint64_t>(d); }
+
+
+TEST(DivisionByConstant, Signed32) {
+ EXPECT_EQ(M32(0x99999999U, 1, false), s32(-5));
+ EXPECT_EQ(M32(0x55555555U, 1, false), s32(-3));
+ int32_t d = -1;
+ for (unsigned k = 1; k <= 32 - 1; ++k) {
+ d *= 2;
+ EXPECT_EQ(M32(0x7FFFFFFFU, k - 1, false), s32(d));
+ }
+ for (unsigned k = 1; k <= 32 - 2; ++k) {
+ EXPECT_EQ(M32(0x80000001U, k - 1, false), s32(1 << k));
+ }
+ EXPECT_EQ(M32(0x55555556U, 0, false), s32(3));
+ EXPECT_EQ(M32(0x66666667U, 1, false), s32(5));
+ EXPECT_EQ(M32(0x2AAAAAABU, 0, false), s32(6));
+ EXPECT_EQ(M32(0x92492493U, 2, false), s32(7));
+ EXPECT_EQ(M32(0x38E38E39U, 1, false), s32(9));
+ EXPECT_EQ(M32(0x66666667U, 2, false), s32(10));
+ EXPECT_EQ(M32(0x2E8BA2E9U, 1, false), s32(11));
+ EXPECT_EQ(M32(0x2AAAAAABU, 1, false), s32(12));
+ EXPECT_EQ(M32(0x51EB851FU, 3, false), s32(25));
+ EXPECT_EQ(M32(0x10624DD3U, 3, false), s32(125));
+ EXPECT_EQ(M32(0x68DB8BADU, 8, false), s32(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned32) {
+ EXPECT_EQ(M32(0x00000000U, 0, true), u32(1));
+ for (unsigned k = 1; k <= 30; ++k) {
+ EXPECT_EQ(M32(1U << (32 - k), 0, false), u32(1U << k));
+ }
+ EXPECT_EQ(M32(0xAAAAAAABU, 1, false), u32(3));
+ EXPECT_EQ(M32(0xCCCCCCCDU, 2, false), u32(5));
+ EXPECT_EQ(M32(0xAAAAAAABU, 2, false), u32(6));
+ EXPECT_EQ(M32(0x24924925U, 3, true), u32(7));
+ EXPECT_EQ(M32(0x38E38E39U, 1, false), u32(9));
+ EXPECT_EQ(M32(0xCCCCCCCDU, 3, false), u32(10));
+ EXPECT_EQ(M32(0xBA2E8BA3U, 3, false), u32(11));
+ EXPECT_EQ(M32(0xAAAAAAABU, 3, false), u32(12));
+ EXPECT_EQ(M32(0x51EB851FU, 3, false), u32(25));
+ EXPECT_EQ(M32(0x10624DD3U, 3, false), u32(125));
+ EXPECT_EQ(M32(0xD1B71759U, 9, false), u32(625));
+}
+
+
+TEST(DivisionByConstant, Signed64) {
+ EXPECT_EQ(M64(0x9999999999999999ULL, 1, false), s64(-5));
+ EXPECT_EQ(M64(0x5555555555555555ULL, 1, false), s64(-3));
+ int64_t d = -1;
+ for (unsigned k = 1; k <= 64 - 1; ++k) {
+ d *= 2;
+ EXPECT_EQ(M64(0x7FFFFFFFFFFFFFFFULL, k - 1, false), s64(d));
+ }
+ for (unsigned k = 1; k <= 64 - 2; ++k) {
+ EXPECT_EQ(M64(0x8000000000000001ULL, k - 1, false), s64(1LL << k));
+ }
+ EXPECT_EQ(M64(0x5555555555555556ULL, 0, false), s64(3));
+ EXPECT_EQ(M64(0x6666666666666667ULL, 1, false), s64(5));
+ EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 0, false), s64(6));
+ EXPECT_EQ(M64(0x4924924924924925ULL, 1, false), s64(7));
+ EXPECT_EQ(M64(0x1C71C71C71C71C72ULL, 0, false), s64(9));
+ EXPECT_EQ(M64(0x6666666666666667ULL, 2, false), s64(10));
+ EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), s64(11));
+ EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 1, false), s64(12));
+ EXPECT_EQ(M64(0xA3D70A3D70A3D70BULL, 4, false), s64(25));
+ EXPECT_EQ(M64(0x20C49BA5E353F7CFULL, 4, false), s64(125));
+ EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), s64(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned64) {
+ EXPECT_EQ(M64(0x0000000000000000ULL, 0, true), u64(1));
+ for (unsigned k = 1; k <= 64 - 2; ++k) {
+ EXPECT_EQ(M64(1ULL << (64 - k), 0, false), u64(1ULL << k));
+ }
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 1, false), u64(3));
+ EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 2, false), u64(5));
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 2, false), u64(6));
+ EXPECT_EQ(M64(0x2492492492492493ULL, 3, true), u64(7));
+ EXPECT_EQ(M64(0xE38E38E38E38E38FULL, 3, false), u64(9));
+ EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 3, false), u64(10));
+ EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), u64(11));
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 3, false), u64(12));
+ EXPECT_EQ(M64(0x47AE147AE147AE15ULL, 5, true), u64(25));
+ EXPECT_EQ(M64(0x0624DD2F1A9FBE77ULL, 7, true), u64(125));
+ EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), u64(625));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/flags-unittest.cc b/deps/v8/test/unittests/base/flags-unittest.cc
new file mode 100644
index 0000000000..6f19399dc2
--- /dev/null
+++ b/deps/v8/test/unittests/base/flags-unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include "src/base/flags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+enum Flag1 {
+ kFlag1None = 0,
+ kFlag1First = 1u << 1,
+ kFlag1Second = 1u << 2,
+ kFlag1All = kFlag1None | kFlag1First | kFlag1Second
+};
+typedef Flags<Flag1> Flags1;
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Flags1)
+
+
+Flags1 bar(Flags1 flags1) { return flags1; }
+
+} // namespace
+
+
+TEST(FlagsTest, BasicOperations) {
+ Flags1 a;
+ EXPECT_EQ(kFlag1None, static_cast<int>(a));
+ a |= kFlag1First;
+ EXPECT_EQ(kFlag1First, static_cast<int>(a));
+ a = a | kFlag1Second;
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ a &= kFlag1Second;
+ EXPECT_EQ(kFlag1Second, static_cast<int>(a));
+ a = kFlag1None & a;
+ EXPECT_EQ(kFlag1None, static_cast<int>(a));
+ a ^= (kFlag1All | kFlag1None);
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ Flags1 b = ~a;
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ EXPECT_EQ(~static_cast<int>(a), static_cast<int>(b));
+ Flags1 c = a;
+ EXPECT_EQ(a, c);
+ EXPECT_NE(a, b);
+ EXPECT_EQ(a, bar(a));
+ EXPECT_EQ(a, bar(kFlag1All));
+}
+
+
+namespace {
+namespace foo {
+
+enum Option {
+ kNoOptions = 0,
+ kOption1 = 1,
+ kOption2 = 2,
+ kAllOptions = kNoOptions | kOption1 | kOption2
+};
+typedef Flags<Option> Options;
+
+} // namespace foo
+
+
+DEFINE_OPERATORS_FOR_FLAGS(foo::Options)
+
+} // namespace
+
+
+TEST(FlagsTest, NamespaceScope) {
+ foo::Options options;
+ options ^= foo::kNoOptions;
+ options |= foo::kOption1 | foo::kOption2;
+ EXPECT_EQ(foo::kAllOptions, static_cast<int>(options));
+}
+
+
+namespace {
+
+struct Foo {
+ enum Enum { kEnum1 = 1, kEnum2 = 2 };
+ typedef Flags<Enum, uint32_t> Enums;
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Foo::Enums)
+
+} // namespace
+
+
+TEST(FlagsTest, ClassScope) {
+ Foo::Enums enums;
+ enums |= Foo::kEnum1;
+ enums |= Foo::kEnum2;
+ EXPECT_TRUE(enums & Foo::kEnum1);
+ EXPECT_TRUE(enums & Foo::kEnum2);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
new file mode 100644
index 0000000000..97a27a438e
--- /dev/null
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -0,0 +1,196 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/functional.h"
+
+#include <limits>
+#include <set>
+
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace base {
+
+TEST(FunctionalTest, HashBool) {
+ hash<bool> h, h1, h2;
+ EXPECT_EQ(h1(true), h2(true));
+ EXPECT_EQ(h1(false), h2(false));
+ EXPECT_NE(h(true), h(false));
+}
+
+
+TEST(FunctionalTest, HashFloatZero) {
+ hash<float> h;
+ EXPECT_EQ(h(0.0f), h(-0.0f));
+}
+
+
+TEST(FunctionalTest, HashDoubleZero) {
+ hash<double> h;
+ EXPECT_EQ(h(0.0), h(-0.0));
+}
+
+
+template <typename T>
+class FunctionalTest : public TestWithRandomNumberGenerator {};
+
+typedef ::testing::Types<signed char, unsigned char,
+ short, // NOLINT(runtime/int)
+ unsigned short, // NOLINT(runtime/int)
+ int, unsigned int, long, // NOLINT(runtime/int)
+ unsigned long, // NOLINT(runtime/int)
+ long long, // NOLINT(runtime/int)
+ unsigned long long, // NOLINT(runtime/int)
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t, float, double> FunctionalTypes;
+
+TYPED_TEST_CASE(FunctionalTest, FunctionalTypes);
+
+
+TYPED_TEST(FunctionalTest, EqualToImpliesSameHashCode) {
+ hash<TypeParam> h;
+ std::equal_to<TypeParam> e;
+ TypeParam values[32];
+ this->rng()->NextBytes(values, sizeof(values));
+ TRACED_FOREACH(TypeParam, v1, values) {
+ TRACED_FOREACH(TypeParam, v2, values) {
+ if (e(v1, v2)) EXPECT_EQ(h(v1), h(v2));
+ }
+ }
+}
+
+
+TYPED_TEST(FunctionalTest, HashEqualsHashValue) {
+ for (int i = 0; i < 128; ++i) {
+ TypeParam v;
+ this->rng()->NextBytes(&v, sizeof(v));
+ hash<TypeParam> h;
+ EXPECT_EQ(h(v), hash_value(v));
+ }
+}
+
+
+TYPED_TEST(FunctionalTest, HashIsStateless) {
+ hash<TypeParam> h1, h2;
+ for (int i = 0; i < 128; ++i) {
+ TypeParam v;
+ this->rng()->NextBytes(&v, sizeof(v));
+ EXPECT_EQ(h1(v), h2(v));
+ }
+}
+
+
+TYPED_TEST(FunctionalTest, HashIsOkish) {
+ std::set<TypeParam> vs;
+ for (size_t i = 0; i < 128; ++i) {
+ TypeParam v;
+ this->rng()->NextBytes(&v, sizeof(v));
+ vs.insert(v);
+ }
+ std::set<size_t> hs;
+ for (const auto& v : vs) {
+ hash<TypeParam> h;
+ hs.insert(h(v));
+ }
+ EXPECT_LE(vs.size() / 4u, hs.size());
+}
+
+
+TYPED_TEST(FunctionalTest, HashValueArrayUsesHashRange) {
+ TypeParam values[128];
+ this->rng()->NextBytes(&values, sizeof(values));
+ EXPECT_EQ(hash_range(values, values + arraysize(values)), hash_value(values));
+}
+
+
+TYPED_TEST(FunctionalTest, BitEqualTo) {
+ bit_equal_to<TypeParam> pred;
+ for (size_t i = 0; i < 128; ++i) {
+ TypeParam v1, v2;
+ this->rng()->NextBytes(&v1, sizeof(v1));
+ this->rng()->NextBytes(&v2, sizeof(v2));
+ EXPECT_PRED2(pred, v1, v1);
+ EXPECT_PRED2(pred, v2, v2);
+ EXPECT_EQ(memcmp(&v1, &v2, sizeof(TypeParam)) == 0, pred(v1, v2));
+ }
+}
+
+
+TYPED_TEST(FunctionalTest, BitEqualToImpliesSameBitHash) {
+ bit_hash<TypeParam> h;
+ bit_equal_to<TypeParam> e;
+ TypeParam values[32];
+ this->rng()->NextBytes(&values, sizeof(values));
+ TRACED_FOREACH(TypeParam, v1, values) {
+ TRACED_FOREACH(TypeParam, v2, values) {
+ if (e(v1, v2)) EXPECT_EQ(h(v1), h(v2));
+ }
+ }
+}
+
+
+namespace {
+
+struct Foo {
+ int x;
+ double y;
+};
+
+
+size_t hash_value(Foo const& v) { return hash_combine(v.x, v.y); }
+
+} // namespace
+
+
+TEST(FunctionalTest, HashUsesArgumentDependentLookup) {
+ const int kIntValues[] = {std::numeric_limits<int>::min(), -1, 0, 1, 42,
+ std::numeric_limits<int>::max()};
+ const double kDoubleValues[] = {
+ std::numeric_limits<double>::min(), -1, -0, 0, 1,
+ std::numeric_limits<double>::max()};
+ TRACED_FOREACH(int, x, kIntValues) {
+ TRACED_FOREACH(double, y, kDoubleValues) {
+ hash<Foo> h;
+ Foo foo = {x, y};
+ EXPECT_EQ(hash_combine(x, y), h(foo));
+ }
+ }
+}
+
+
+TEST(FunctionalTest, BitEqualToFloat) {
+ bit_equal_to<float> pred;
+ EXPECT_FALSE(pred(0.0f, -0.0f));
+ EXPECT_FALSE(pred(-0.0f, 0.0f));
+ float const qNaN = std::numeric_limits<float>::quiet_NaN();
+ float const sNaN = std::numeric_limits<float>::signaling_NaN();
+ EXPECT_PRED2(pred, qNaN, qNaN);
+ EXPECT_PRED2(pred, sNaN, sNaN);
+}
+
+
+TEST(FunctionalTest, BitHashFloatDifferentForZeroAndMinusZero) {
+ bit_hash<float> h;
+ EXPECT_NE(h(0.0f), h(-0.0f));
+}
+
+
+TEST(FunctionalTest, BitEqualToDouble) {
+ bit_equal_to<double> pred;
+ EXPECT_FALSE(pred(0.0, -0.0));
+ EXPECT_FALSE(pred(-0.0, 0.0));
+ double const qNaN = std::numeric_limits<double>::quiet_NaN();
+ double const sNaN = std::numeric_limits<double>::signaling_NaN();
+ EXPECT_PRED2(pred, qNaN, qNaN);
+ EXPECT_PRED2(pred, sNaN, sNaN);
+}
+
+
+TEST(FunctionalTest, BitHashDoubleDifferentForZeroAndMinusZero) {
+ bit_hash<double> h;
+ EXPECT_NE(h(0.0), h(-0.0));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
new file mode 100644
index 0000000000..fe0ad2ade8
--- /dev/null
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -0,0 +1,301 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
+ for (int n = 0; n < 10; ++n) {
+ Mutex mutex;
+ ConditionVariable cv;
+
+ LockGuard<Mutex> lock_guard(&mutex);
+
+ cv.NotifyOne();
+ EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+
+ cv.NotifyAll();
+ EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+ }
+}
+
+
+namespace {
+
+class ThreadWithMutexAndConditionVariable FINAL : public Thread {
+ public:
+ ThreadWithMutexAndConditionVariable()
+ : Thread(Options("ThreadWithMutexAndConditionVariable")),
+ running_(false),
+ finished_(false) {}
+ virtual ~ThreadWithMutexAndConditionVariable() {}
+
+ virtual void Run() OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ running_ = true;
+ cv_.NotifyOne();
+ while (running_) {
+ cv_.Wait(&mutex_);
+ }
+ finished_ = true;
+ cv_.NotifyAll();
+ }
+
+ bool running_;
+ bool finished_;
+ ConditionVariable cv_;
+ Mutex mutex_;
+};
+
+} // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
+ static const int kThreadCount = 128;
+ ThreadWithMutexAndConditionVariable threads[kThreadCount];
+
+ for (int n = 0; n < kThreadCount; ++n) {
+ LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ EXPECT_FALSE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ threads[n].Start();
+ // Wait for nth thread to start.
+ while (!threads[n].running_) {
+ threads[n].cv_.Wait(&threads[n].mutex_);
+ }
+ }
+
+ for (int n = kThreadCount - 1; n >= 0; --n) {
+ LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ EXPECT_TRUE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ }
+
+ for (int n = 0; n < kThreadCount; ++n) {
+ LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ EXPECT_TRUE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ // Tell the nth thread to quit.
+ threads[n].running_ = false;
+ threads[n].cv_.NotifyOne();
+ }
+
+ for (int n = kThreadCount - 1; n >= 0; --n) {
+ // Wait for nth thread to quit.
+ LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ while (!threads[n].finished_) {
+ threads[n].cv_.Wait(&threads[n].mutex_);
+ }
+ EXPECT_FALSE(threads[n].running_);
+ EXPECT_TRUE(threads[n].finished_);
+ }
+
+ for (int n = 0; n < kThreadCount; ++n) {
+ threads[n].Join();
+ LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ EXPECT_FALSE(threads[n].running_);
+ EXPECT_TRUE(threads[n].finished_);
+ }
+}
+
+
+namespace {
+
+class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
+ public:
+ ThreadWithSharedMutexAndConditionVariable()
+ : Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
+ running_(false),
+ finished_(false),
+ cv_(NULL),
+ mutex_(NULL) {}
+ virtual ~ThreadWithSharedMutexAndConditionVariable() {}
+
+ virtual void Run() OVERRIDE {
+ LockGuard<Mutex> lock_guard(mutex_);
+ running_ = true;
+ cv_->NotifyAll();
+ while (running_) {
+ cv_->Wait(mutex_);
+ }
+ finished_ = true;
+ cv_->NotifyAll();
+ }
+
+ bool running_;
+ bool finished_;
+ ConditionVariable* cv_;
+ Mutex* mutex_;
+};
+
+} // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
+ static const int kThreadCount = 128;
+ ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
+ ConditionVariable cv;
+ Mutex mutex;
+
+ for (int n = 0; n < kThreadCount; ++n) {
+ threads[n].mutex_ = &mutex;
+ threads[n].cv_ = &cv;
+ }
+
+ // Start all threads.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = 0; n < kThreadCount; ++n) {
+ EXPECT_FALSE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ threads[n].Start();
+ }
+ }
+
+ // Wait for all threads to start.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = kThreadCount - 1; n >= 0; --n) {
+ while (!threads[n].running_) {
+ cv.Wait(&mutex);
+ }
+ }
+ }
+
+ // Make sure that all threads are running.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = 0; n < kThreadCount; ++n) {
+ EXPECT_TRUE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ }
+ }
+
+ // Tell all threads to quit.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = kThreadCount - 1; n >= 0; --n) {
+ EXPECT_TRUE(threads[n].running_);
+ EXPECT_FALSE(threads[n].finished_);
+ // Tell the nth thread to quit.
+ threads[n].running_ = false;
+ }
+ cv.NotifyAll();
+ }
+
+ // Wait for all threads to quit.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = 0; n < kThreadCount; ++n) {
+ while (!threads[n].finished_) {
+ cv.Wait(&mutex);
+ }
+ }
+ }
+
+ // Make sure all threads are finished.
+ {
+ LockGuard<Mutex> lock_guard(&mutex);
+ for (int n = kThreadCount - 1; n >= 0; --n) {
+ EXPECT_FALSE(threads[n].running_);
+ EXPECT_TRUE(threads[n].finished_);
+ }
+ }
+
+ // Join all threads.
+ for (int n = 0; n < kThreadCount; ++n) {
+ threads[n].Join();
+ }
+}
+
+
+namespace {
+
+class LoopIncrementThread FINAL : public Thread {
+ public:
+ LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
+ ConditionVariable* cv, Mutex* mutex)
+ : Thread(Options("LoopIncrementThread")),
+ rem_(rem),
+ counter_(counter),
+ limit_(limit),
+ thread_count_(thread_count),
+ cv_(cv),
+ mutex_(mutex) {
+ EXPECT_LT(rem, thread_count);
+ EXPECT_EQ(0, limit % thread_count);
+ }
+
+ virtual void Run() OVERRIDE {
+ int last_count = -1;
+ while (true) {
+ LockGuard<Mutex> lock_guard(mutex_);
+ int count = *counter_;
+ while (count % thread_count_ != rem_ && count < limit_) {
+ cv_->Wait(mutex_);
+ count = *counter_;
+ }
+ if (count >= limit_) break;
+ EXPECT_EQ(*counter_, count);
+ if (last_count != -1) {
+ EXPECT_EQ(last_count + (thread_count_ - 1), count);
+ }
+ count++;
+ *counter_ = count;
+ last_count = count;
+ cv_->NotifyAll();
+ }
+ }
+
+ private:
+ const int rem_;
+ int* counter_;
+ const int limit_;
+ const int thread_count_;
+ ConditionVariable* cv_;
+ Mutex* mutex_;
+};
+
+} // namespace
+
+
+TEST(ConditionVariable, LoopIncrement) {
+ static const int kMaxThreadCount = 16;
+ Mutex mutex;
+ ConditionVariable cv;
+ for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
+ int limit = thread_count * 10;
+ int counter = 0;
+
+ // Setup the threads.
+ Thread** threads = new Thread* [thread_count];
+ for (int n = 0; n < thread_count; ++n) {
+ threads[n] = new LoopIncrementThread(n, &counter, limit, thread_count,
+ &cv, &mutex);
+ }
+
+ // Start all threads.
+ for (int n = thread_count - 1; n >= 0; --n) {
+ threads[n]->Start();
+ }
+
+ // Join and cleanup all threads.
+ for (int n = 0; n < thread_count; ++n) {
+ threads[n]->Join();
+ delete threads[n];
+ }
+ delete[] threads;
+
+ EXPECT_EQ(limit, counter);
+ }
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/mutex-unittest.cc b/deps/v8/test/unittests/base/platform/mutex-unittest.cc
new file mode 100644
index 0000000000..5af5efb5a9
--- /dev/null
+++ b/deps/v8/test/unittests/base/platform/mutex-unittest.cc
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(Mutex, LockGuardMutex) {
+ Mutex mutex;
+ { LockGuard<Mutex> lock_guard(&mutex); }
+ { LockGuard<Mutex> lock_guard(&mutex); }
+}
+
+
+TEST(Mutex, LockGuardRecursiveMutex) {
+ RecursiveMutex recursive_mutex;
+ { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex); }
+ {
+ LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
+ LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+ }
+}
+
+
+TEST(Mutex, LockGuardLazyMutex) {
+ LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+}
+
+
+TEST(Mutex, LockGuardLazyRecursiveMutex) {
+ LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+ { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer()); }
+ {
+ LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
+ LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+ }
+}
+
+
+TEST(Mutex, MultipleMutexes) {
+ Mutex mutex1;
+ Mutex mutex2;
+ Mutex mutex3;
+ // Order 1
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex1.Unlock();
+ mutex2.Unlock();
+ mutex3.Unlock();
+ // Order 2
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex3.Unlock();
+ mutex2.Unlock();
+ mutex1.Unlock();
+}
+
+
+TEST(Mutex, MultipleRecursiveMutexes) {
+ RecursiveMutex recursive_mutex1;
+ RecursiveMutex recursive_mutex2;
+ // Order 1
+ recursive_mutex1.Lock();
+ recursive_mutex2.Lock();
+ EXPECT_TRUE(recursive_mutex1.TryLock());
+ EXPECT_TRUE(recursive_mutex2.TryLock());
+ recursive_mutex1.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex2.Unlock();
+ // Order 2
+ recursive_mutex1.Lock();
+ EXPECT_TRUE(recursive_mutex1.TryLock());
+ recursive_mutex2.Lock();
+ EXPECT_TRUE(recursive_mutex2.TryLock());
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
new file mode 100644
index 0000000000..06fbee0042
--- /dev/null
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/platform.h"
+
+#if V8_OS_POSIX
+#include <unistd.h> // NOLINT
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(OS, GetCurrentProcessId) {
+#if V8_OS_POSIX
+ EXPECT_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
+#endif
+
+#if V8_OS_WIN
+ EXPECT_EQ(static_cast<int>(::GetCurrentProcessId()),
+ OS::GetCurrentProcessId());
+#endif
+}
+
+
+namespace {
+
+class SelfJoinThread FINAL : public Thread {
+ public:
+ SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
+ virtual void Run() OVERRIDE { Join(); }
+};
+
+} // namespace
+
+
+TEST(Thread, SelfJoin) {
+ SelfJoinThread thread;
+ thread.Start();
+ thread.Join();
+}
+
+
+namespace {
+
+class ThreadLocalStorageTest : public Thread, public ::testing::Test {
+ public:
+ ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
+ for (size_t i = 0; i < arraysize(keys_); ++i) {
+ keys_[i] = Thread::CreateThreadLocalKey();
+ }
+ }
+ ~ThreadLocalStorageTest() {
+ for (size_t i = 0; i < arraysize(keys_); ++i) {
+ Thread::DeleteThreadLocalKey(keys_[i]);
+ }
+ }
+
+ virtual void Run() FINAL OVERRIDE {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK(!Thread::HasThreadLocal(keys_[i]));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ Thread::SetThreadLocal(keys_[i], GetValue(i));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK(Thread::HasThreadLocal(keys_[i]));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK_EQ(GetValue(i), Thread::GetThreadLocal(keys_[i]));
+ CHECK_EQ(GetValue(i), Thread::GetExistingThreadLocal(keys_[i]));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ Thread::SetThreadLocal(keys_[i], GetValue(arraysize(keys_) - i - 1));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK(Thread::HasThreadLocal(keys_[i]));
+ }
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+ Thread::GetThreadLocal(keys_[i]));
+ CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+ Thread::GetExistingThreadLocal(keys_[i]));
+ }
+ }
+
+ private:
+ static void* GetValue(size_t x) {
+ return reinterpret_cast<void*>(static_cast<uintptr_t>(x + 1));
+ }
+
+ Thread::LocalStorageKey keys_[256];
+};
+
+} // namespace
+
+
+TEST_F(ThreadLocalStorageTest, DoTest) {
+ Run();
+ Start();
+ Join();
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/semaphore-unittest.cc b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
new file mode 100644
index 0000000000..c68435f875
--- /dev/null
+++ b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+static const char kAlphabet[] = "XKOAD";
+static const size_t kAlphabetSize = sizeof(kAlphabet) - 1;
+static const size_t kBufferSize = 987; // GCD(buffer size, alphabet size) = 1
+static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
+
+
+class ProducerThread FINAL : public Thread {
+ public:
+ ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
+ : Thread(Options("ProducerThread")),
+ buffer_(buffer),
+ free_space_(free_space),
+ used_space_(used_space) {}
+ virtual ~ProducerThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (size_t n = 0; n < kDataSize; ++n) {
+ free_space_->Wait();
+ buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
+ used_space_->Signal();
+ }
+ }
+
+ private:
+ char* buffer_;
+ Semaphore* const free_space_;
+ Semaphore* const used_space_;
+};
+
+
+class ConsumerThread FINAL : public Thread {
+ public:
+ ConsumerThread(const char* buffer, Semaphore* free_space,
+ Semaphore* used_space)
+ : Thread(Options("ConsumerThread")),
+ buffer_(buffer),
+ free_space_(free_space),
+ used_space_(used_space) {}
+ virtual ~ConsumerThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (size_t n = 0; n < kDataSize; ++n) {
+ used_space_->Wait();
+ EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
+ free_space_->Signal();
+ }
+ }
+
+ private:
+ const char* buffer_;
+ Semaphore* const free_space_;
+ Semaphore* const used_space_;
+};
+
+
+class WaitAndSignalThread FINAL : public Thread {
+ public:
+ explicit WaitAndSignalThread(Semaphore* semaphore)
+ : Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
+ virtual ~WaitAndSignalThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (int n = 0; n < 100; ++n) {
+ semaphore_->Wait();
+ ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
+ semaphore_->Signal();
+ }
+ }
+
+ private:
+ Semaphore* const semaphore_;
+};
+
+} // namespace
+
+
+TEST(Semaphore, ProducerConsumer) {
+ char buffer[kBufferSize];
+ std::memset(buffer, 0, sizeof(buffer));
+ Semaphore free_space(kBufferSize);
+ Semaphore used_space(0);
+ ProducerThread producer_thread(buffer, &free_space, &used_space);
+ ConsumerThread consumer_thread(buffer, &free_space, &used_space);
+ producer_thread.Start();
+ consumer_thread.Start();
+ producer_thread.Join();
+ consumer_thread.Join();
+}
+
+
+TEST(Semaphore, WaitAndSignal) {
+ Semaphore semaphore(0);
+ WaitAndSignalThread t1(&semaphore);
+ WaitAndSignalThread t2(&semaphore);
+
+ t1.Start();
+ t2.Start();
+
+ // Make something available.
+ semaphore.Signal();
+
+ t1.Join();
+ t2.Join();
+
+ semaphore.Wait();
+
+ EXPECT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1)));
+}
+
+
+TEST(Semaphore, WaitFor) {
+ Semaphore semaphore(0);
+
+ // Semaphore not signalled - timeout.
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+
+ // Semaphore signalled - no timeout.
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
new file mode 100644
index 0000000000..b3bfbab319
--- /dev/null
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#include "src/base/platform/elapsed-timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(TimeDelta, FromAndIn) {
+ EXPECT_EQ(TimeDelta::FromDays(2), TimeDelta::FromHours(48));
+ EXPECT_EQ(TimeDelta::FromHours(3), TimeDelta::FromMinutes(180));
+ EXPECT_EQ(TimeDelta::FromMinutes(2), TimeDelta::FromSeconds(120));
+ EXPECT_EQ(TimeDelta::FromSeconds(2), TimeDelta::FromMilliseconds(2000));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2), TimeDelta::FromMicroseconds(2000));
+ EXPECT_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
+ EXPECT_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
+ EXPECT_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
+ EXPECT_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
+ EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
+ EXPECT_EQ(static_cast<int64_t>(13),
+ TimeDelta::FromMilliseconds(13).InMilliseconds());
+ EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
+ EXPECT_EQ(static_cast<int64_t>(13),
+ TimeDelta::FromMicroseconds(13).InMicroseconds());
+}
+
+
+#if V8_OS_MACOSX
+TEST(TimeDelta, MachTimespec) {
+ TimeDelta null = TimeDelta();
+ EXPECT_EQ(null, TimeDelta::FromMachTimespec(null.ToMachTimespec()));
+ TimeDelta delta1 = TimeDelta::FromMilliseconds(42);
+ EXPECT_EQ(delta1, TimeDelta::FromMachTimespec(delta1.ToMachTimespec()));
+ TimeDelta delta2 = TimeDelta::FromDays(42);
+ EXPECT_EQ(delta2, TimeDelta::FromMachTimespec(delta2.ToMachTimespec()));
+}
+#endif
+
+
+TEST(Time, JsTime) {
+ Time t = Time::FromJsTime(700000.3);
+ EXPECT_DOUBLE_EQ(700000.3, t.ToJsTime());
+}
+
+
+#if V8_OS_POSIX
+TEST(Time, Timespec) {
+ Time null;
+ EXPECT_TRUE(null.IsNull());
+ EXPECT_EQ(null, Time::FromTimespec(null.ToTimespec()));
+ Time now = Time::Now();
+ EXPECT_EQ(now, Time::FromTimespec(now.ToTimespec()));
+ Time now_sys = Time::NowFromSystemTime();
+ EXPECT_EQ(now_sys, Time::FromTimespec(now_sys.ToTimespec()));
+ Time unix_epoch = Time::UnixEpoch();
+ EXPECT_EQ(unix_epoch, Time::FromTimespec(unix_epoch.ToTimespec()));
+ Time max = Time::Max();
+ EXPECT_TRUE(max.IsMax());
+ EXPECT_EQ(max, Time::FromTimespec(max.ToTimespec()));
+}
+
+
+TEST(Time, Timeval) {
+ Time null;
+ EXPECT_TRUE(null.IsNull());
+ EXPECT_EQ(null, Time::FromTimeval(null.ToTimeval()));
+ Time now = Time::Now();
+ EXPECT_EQ(now, Time::FromTimeval(now.ToTimeval()));
+ Time now_sys = Time::NowFromSystemTime();
+ EXPECT_EQ(now_sys, Time::FromTimeval(now_sys.ToTimeval()));
+ Time unix_epoch = Time::UnixEpoch();
+ EXPECT_EQ(unix_epoch, Time::FromTimeval(unix_epoch.ToTimeval()));
+ Time max = Time::Max();
+ EXPECT_TRUE(max.IsMax());
+ EXPECT_EQ(max, Time::FromTimeval(max.ToTimeval()));
+}
+#endif
+
+
+#if V8_OS_WIN
+TEST(Time, Filetime) {
+ Time null;
+ EXPECT_TRUE(null.IsNull());
+ EXPECT_EQ(null, Time::FromFiletime(null.ToFiletime()));
+ Time now = Time::Now();
+ EXPECT_EQ(now, Time::FromFiletime(now.ToFiletime()));
+ Time now_sys = Time::NowFromSystemTime();
+ EXPECT_EQ(now_sys, Time::FromFiletime(now_sys.ToFiletime()));
+ Time unix_epoch = Time::UnixEpoch();
+ EXPECT_EQ(unix_epoch, Time::FromFiletime(unix_epoch.ToFiletime()));
+ Time max = Time::Max();
+ EXPECT_TRUE(max.IsMax());
+ EXPECT_EQ(max, Time::FromFiletime(max.ToFiletime()));
+}
+#endif
+
+
+namespace {
+
+template <typename T>
+static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
+ // We're trying to measure that intervals increment in a VERY small amount
+ // of time -- according to the specified target granularity. Unfortunately,
+ // if we happen to have a context switch in the middle of our test, the
+ // context switch could easily exceed our limit. So, we iterate on this
+ // several times. As long as we're able to detect the fine-granularity
+ // timers at least once, then the test has succeeded.
+ static const TimeDelta kExpirationTimeout = TimeDelta::FromSeconds(1);
+ ElapsedTimer timer;
+ timer.Start();
+ TimeDelta delta;
+ do {
+ T start = Now();
+ T now = start;
+ // Loop until we can detect that the clock has changed. Non-HighRes timers
+ // will increment in chunks, i.e. 15ms. By spinning until we see a clock
+ // change, we detect the minimum time between measurements.
+ do {
+ now = Now();
+ delta = now - start;
+ } while (now <= start);
+ EXPECT_NE(static_cast<int64_t>(0), delta.InMicroseconds());
+ } while (delta > target_granularity && !timer.HasExpired(kExpirationTimeout));
+ EXPECT_LE(delta, target_granularity);
+}
+
+} // namespace
+
+
+TEST(Time, NowResolution) {
+ // We assume that Time::Now() has at least 16ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+ ResolutionTest<Time>(&Time::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, NowResolution) {
+ // We assume that TimeTicks::Now() has at least 16ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+ ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, HighResolutionNowResolution) {
+ if (!TimeTicks::IsHighResolutionClockWorking()) return;
+
+ // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
+ ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, IsMonotonic) {
+ TimeTicks previous_normal_ticks;
+ TimeTicks previous_highres_ticks;
+ ElapsedTimer timer;
+ timer.Start();
+ while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
+ TimeTicks normal_ticks = TimeTicks::Now();
+ TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
+ EXPECT_GE(normal_ticks, previous_normal_ticks);
+ EXPECT_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
+ EXPECT_GE(highres_ticks, previous_highres_ticks);
+ EXPECT_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
+ previous_normal_ticks = normal_ticks;
+ previous_highres_ticks = highres_ticks;
+ }
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/sys-info-unittest.cc b/deps/v8/test/unittests/base/sys-info-unittest.cc
new file mode 100644
index 0000000000..a760f941f6
--- /dev/null
+++ b/deps/v8/test/unittests/base/sys-info-unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_NACL
+#define DISABLE_ON_NACL(Name) DISABLED_##Name
+#else
+#define DISABLE_ON_NACL(Name) Name
+#endif
+
+namespace v8 {
+namespace base {
+
+TEST(SysInfoTest, NumberOfProcessors) {
+ EXPECT_LT(0, SysInfo::NumberOfProcessors());
+}
+
+
+TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+ EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
+}
+
+
+TEST(SysInfoTest, AmountOfVirtualMemory) {
+ EXPECT_LE(0, SysInfo::AmountOfVirtualMemory());
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
new file mode 100644
index 0000000000..7c533db4f0
--- /dev/null
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <climits>
+
+#include "src/base/utils/random-number-generator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+class RandomNumberGeneratorTest : public ::testing::TestWithParam<int> {};
+
+
+static const int kMaxRuns = 12345;
+
+
+TEST_P(RandomNumberGeneratorTest, NextIntWithMaxValue) {
+ RandomNumberGenerator rng(GetParam());
+ for (int max = 1; max <= kMaxRuns; ++max) {
+ int n = rng.NextInt(max);
+ EXPECT_LE(0, n);
+ EXPECT_LT(n, max);
+ }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextBooleanReturnsFalseOrTrue) {
+ RandomNumberGenerator rng(GetParam());
+ for (int k = 0; k < kMaxRuns; ++k) {
+ bool b = rng.NextBool();
+ EXPECT_TRUE(b == false || b == true);
+ }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextDoubleReturnsValueBetween0And1) {
+ RandomNumberGenerator rng(GetParam());
+ for (int k = 0; k < kMaxRuns; ++k) {
+ double d = rng.NextDouble();
+ EXPECT_LE(0.0, d);
+ EXPECT_LT(d, 1.0);
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(RandomSeeds, RandomNumberGeneratorTest,
+ ::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
+ 1234567890, 987654321, INT_MAX));
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/char-predicates-unittest.cc b/deps/v8/test/unittests/char-predicates-unittest.cc
new file mode 100644
index 0000000000..d1ba2c5da8
--- /dev/null
+++ b/deps/v8/test/unittests/char-predicates-unittest.cc
@@ -0,0 +1,121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/char-predicates.h"
+#include "src/unicode.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(CharPredicatesTest, WhiteSpace) {
+ // As of Unicode 6.3.0, \u180E is no longer a white space. We still consider
+ // it to be one though, since JS recognizes all white spaces in Unicode 5.1.
+ EXPECT_TRUE(WhiteSpace::Is(0x0009));
+ EXPECT_TRUE(WhiteSpace::Is(0x000B));
+ EXPECT_TRUE(WhiteSpace::Is(0x000C));
+ EXPECT_TRUE(WhiteSpace::Is(' '));
+ EXPECT_TRUE(WhiteSpace::Is(0x00A0));
+ EXPECT_TRUE(WhiteSpace::Is(0x180E));
+ EXPECT_TRUE(WhiteSpace::Is(0xFEFF));
+}
+
+
+TEST(CharPredicatesTest, WhiteSpaceOrLineTerminator) {
+ // As of Unicode 6.3.0, \u180E is no longer a white space. We still consider
+ // it to be one though, since JS recognizes all white spaces in Unicode 5.1.
+ // White spaces
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x0009));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000B));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000C));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(' '));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x00A0));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x180E));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0xFEFF));
+ // Line terminators
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000A));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000D));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2028));
+ EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2029));
+}
+
+
+TEST(CharPredicatesTest, IdentifierStart) {
+ EXPECT_TRUE(IdentifierStart::Is('$'));
+ EXPECT_TRUE(IdentifierStart::Is('_'));
+ EXPECT_TRUE(IdentifierStart::Is('\\'));
+
+ // http://www.unicode.org/reports/tr31/
+ // Other_ID_Start
+ EXPECT_TRUE(IdentifierStart::Is(0x2118));
+ EXPECT_TRUE(IdentifierStart::Is(0x212E));
+ EXPECT_TRUE(IdentifierStart::Is(0x309B));
+ EXPECT_TRUE(IdentifierStart::Is(0x309C));
+
+ // Issue 2892:
+ // \u2E2F has the Pattern_Syntax property, excluding it from ID_Start.
+ EXPECT_FALSE(unibrow::ID_Start::Is(0x2E2F));
+}
+
+
+TEST(CharPredicatesTest, IdentifierPart) {
+ EXPECT_TRUE(IdentifierPart::Is('$'));
+ EXPECT_TRUE(IdentifierPart::Is('_'));
+ EXPECT_TRUE(IdentifierPart::Is('\\'));
+ EXPECT_TRUE(IdentifierPart::Is(0x200C));
+ EXPECT_TRUE(IdentifierPart::Is(0x200D));
+
+ // http://www.unicode.org/reports/tr31/
+ // Other_ID_Start
+ EXPECT_TRUE(IdentifierPart::Is(0x2118));
+ EXPECT_TRUE(IdentifierPart::Is(0x212E));
+ EXPECT_TRUE(IdentifierPart::Is(0x309B));
+ EXPECT_TRUE(IdentifierPart::Is(0x309C));
+
+ // Other_ID_Continue
+ EXPECT_TRUE(IdentifierPart::Is(0x00B7));
+ EXPECT_TRUE(IdentifierPart::Is(0x0387));
+ EXPECT_TRUE(IdentifierPart::Is(0x1369));
+ EXPECT_TRUE(IdentifierPart::Is(0x1370));
+ EXPECT_TRUE(IdentifierPart::Is(0x1371));
+ EXPECT_TRUE(IdentifierPart::Is(0x19DA));
+
+ // Issue 2892:
+ // \u2E2F has the Pattern_Syntax property, excluding it from ID_Start.
+ EXPECT_FALSE(IdentifierPart::Is(0x2E2F));
+}
+
+
+#ifdef V8_I18N_SUPPORT
+TEST(CharPredicatesTest, SupplementaryPlaneIdentifiers) {
+ // Both ID_Start and ID_Continue.
+ EXPECT_TRUE(IdentifierStart::Is(0x10403)); // Category Lu
+ EXPECT_TRUE(IdentifierPart::Is(0x10403));
+ EXPECT_TRUE(IdentifierStart::Is(0x1043C)); // Category Ll
+ EXPECT_TRUE(IdentifierPart::Is(0x1043C));
+ EXPECT_TRUE(IdentifierStart::Is(0x16F9C)); // Category Lm
+ EXPECT_TRUE(IdentifierPart::Is(0x16F9C));
+ EXPECT_TRUE(IdentifierStart::Is(0x10048)); // Category Lo
+ EXPECT_TRUE(IdentifierPart::Is(0x10048));
+ EXPECT_TRUE(IdentifierStart::Is(0x1014D)); // Category Nl
+ EXPECT_TRUE(IdentifierPart::Is(0x1014D));
+
+ // Only ID_Continue.
+ EXPECT_FALSE(IdentifierStart::Is(0x101FD)); // Category Mn
+ EXPECT_TRUE(IdentifierPart::Is(0x101FD));
+ EXPECT_FALSE(IdentifierStart::Is(0x11002)); // Category Mc
+ EXPECT_TRUE(IdentifierPart::Is(0x11002));
+ EXPECT_FALSE(IdentifierStart::Is(0x104A9)); // Category Nd
+ EXPECT_TRUE(IdentifierPart::Is(0x104A9));
+
+ // Neither.
+ EXPECT_FALSE(IdentifierStart::Is(0x10111)); // Category No
+ EXPECT_FALSE(IdentifierPart::Is(0x10111));
+ EXPECT_FALSE(IdentifierStart::Is(0x1F4A9)); // Category So
+ EXPECT_FALSE(IdentifierPart::Is(0x1F4A9));
+}
+#endif // V8_I18N_SUPPORT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
new file mode 100644
index 0000000000..6e4306de05
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -0,0 +1,2127 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+// Data processing instructions.
+struct DPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+ ArchOpcode test_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
+ return os << dpi.constructor_name;
+}
+
+
+static const DPI kDPIs[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+
+
+// Data processing instructions with overflow.
+struct ODPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
+ return os << odpi.constructor_name;
+}
+
+
+static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+ "Int32AddWithOverflow", kArmAdd, kArmAdd},
+ {&RawMachineAssembler::Int32SubWithOverflow,
+ "Int32SubWithOverflow", kArmSub, kArmRsb}};
+
+
+// Shifts.
+struct Shift {
+ Constructor constructor;
+ const char* constructor_name;
+ int32_t i_low; // lowest possible immediate
+ int32_t i_high; // highest possible immediate
+ AddressingMode i_mode; // Operand2_R_<shift>_I
+ AddressingMode r_mode; // Operand2_R_<shift>_R
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+ return os << shift.constructor_name;
+}
+
+
+static const Shift kShifts[] = {
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+ kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+ kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+ kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+ kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ std::numeric_limits<int32_t>::min(), -2147483617, -2147483606, -2113929216,
+ -2080374784, -1996488704, -1879048192, -1459617792,
+ -1358954496, -1342177265, -1275068414, -1073741818,
+ -1073741777, -855638016, -805306368, -402653184,
+ -268435444, -16777216, 0, 35,
+ 61, 105, 116, 171,
+ 245, 255, 692, 1216,
+ 1248, 1520, 1600, 1888,
+ 3744, 4080, 5888, 8384,
+ 9344, 9472, 9792, 13312,
+ 15040, 15360, 20736, 22272,
+ 23296, 32000, 33536, 37120,
+ 45824, 47872, 56320, 59392,
+ 65280, 72704, 101376, 147456,
+ 161792, 164864, 167936, 173056,
+ 195584, 209920, 212992, 356352,
+ 655360, 704512, 716800, 851968,
+ 901120, 1044480, 1523712, 2572288,
+ 3211264, 3588096, 3833856, 3866624,
+ 4325376, 5177344, 6488064, 7012352,
+ 7471104, 14090240, 16711680, 19398656,
+ 22282240, 28573696, 30408704, 30670848,
+ 43253760, 54525952, 55312384, 56623104,
+ 68157440, 115343360, 131072000, 187695104,
+ 188743680, 195035136, 197132288, 203423744,
+ 218103808, 267386880, 268435470, 285212672,
+ 402653185, 415236096, 595591168, 603979776,
+ 603979778, 629145600, 1073741835, 1073741855,
+ 1073741861, 1073741884, 1157627904, 1476395008,
+ 1476395010, 1610612741, 2030043136, 2080374785,
+ 2097152000};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions.
+
+
+typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
+
+
+TEST_P(InstructionSelectorDPITest, Parameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorDPITest, Immediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(
+ m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+ ::testing::ValuesIn(kDPIs));
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+ ::testing::ValuesIn(kODPIs));
+
+
+// -----------------------------------------------------------------------------
+// Shifts.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
+ const Shift shift = GetParam();
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShifts));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode ldr_opcode;
+ ArchOpcode str_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ return os << memacc.type;
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8,
+ kArmLdrsb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint8,
+ kArmLdrb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
+ -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
+ 39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
+ {kMachInt16,
+ kArmLdrsh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
+ -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
+ 102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
+ {kMachUint16,
+ kArmLdrh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
+ -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
+ 114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
+ {kMachInt32,
+ kArmLdr,
+ kArmStr,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
+ -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
+ 93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+ {kMachFloat32,
+ kArmVldrF32,
+ kArmVstrF32,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+ -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+ 24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
+ {kMachFloat64,
+ kArmVldrF64,
+ kArmVstrF64,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
+ -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
+ 108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcvtF64F32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcvtF32F64, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Comparisons.
+
+
+namespace {
+
+struct Comparison {
+ Constructor constructor;
+ const char* constructor_name;
+ FlagsCondition flags_condition;
+ FlagsCondition negated_flags_condition;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Comparison& cmp) {
+ return os << cmp.constructor_name;
+}
+
+
+const Comparison kComparisons[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kSignedLessThan,
+ kSignedGreaterThanOrEqual},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kSignedLessThanOrEqual, kSignedGreaterThan},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kUnsignedLessThan,
+ kUnsignedGreaterThanOrEqual},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<Comparison>
+ InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, Parameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const r = (m.*cmp.constructor)(p0, p1);
+ m.Return(r);
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->OutputAt(0)));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, Word32EqualWithZero) {
+ {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const r =
+ m.Word32Equal((m.*cmp.constructor)(p0, p1), m.Int32Constant(0));
+ m.Return(r);
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->OutputAt(0)));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.negated_flags_condition, s[0]->flags_condition());
+ }
+ {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const r =
+ m.Word32Equal(m.Int32Constant(0), (m.*cmp.constructor)(p0, p1));
+ m.Return(r);
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->OutputAt(0)));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.negated_flags_condition, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisons));
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Int32Add(p0, m.Int32Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Int32Add(m.Int32Mul(p1, p2), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Int32Add(p0, m.Int32MulHigh(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSmmla, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Int32Add(m.Int32MulHigh(p1, p2), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSmmla, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmSub, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build(MLS);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMls, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(3U, s[0]->InputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+ // x * (2^k + 1) -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Int32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSmmul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32MulHighWithParameters) {
+ StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Uint32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUmull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->OutputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32DivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32DivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32ModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(
+ m.Parameter(0),
+ m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
new file mode 100644
index 0000000000..96b8a83eb6
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -0,0 +1,2029 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+
+struct Shift {
+ MachInst2 mi;
+ AddressingMode mode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+ return os << shift.mi;
+}
+
+
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
+ int64_t value) {
+ switch (type) {
+ case kMachInt32:
+ return m.Int32Constant(value);
+ break;
+
+ case kMachInt64:
+ return m.Int64Constant(value);
+ break;
+
+ default:
+ UNIMPLEMENTED();
+ }
+ return NULL;
+}
+
+
+// ARM64 logical instructions.
+static const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32, kMachInt32},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor, kMachInt64}};
+
+
+// ARM64 logical immediates: contiguous set bits, rotated about a power of two
+// sized block. The block is then duplicated across the word. Below is a random
+// subset of the 32-bit immediates.
+static const uint32_t kLogical32Immediates[] = {
+ 0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
+ 0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
+ 0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
+ 0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
+ 0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
+ 0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
+ 0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
+ 0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
+ 0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
+ 0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
+ 0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
+ 0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
+ 0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
+ 0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
+ 0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
+ 0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
+ 0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
+
+
+// Random subset of 64-bit logical immediates.
+static const uint64_t kLogical64Immediates[] = {
+ 0x0000000000000001, 0x0000000000000002, 0x0000000000000003,
+ 0x0000000000000070, 0x0000000000000080, 0x0000000000000100,
+ 0x00000000000001c0, 0x0000000000000300, 0x0000000000000600,
+ 0x00000000000007e0, 0x0000000000003ffc, 0x0000000000007fc0,
+ 0x0000000600000000, 0x0000003ffffffffc, 0x000000f000000000,
+ 0x000001f800000000, 0x0003fc0000000000, 0x0003fc000003fc00,
+ 0x0003ffffffc00000, 0x0003ffffffffffc0, 0x0006000000060000,
+ 0x003ffffffffc0000, 0x0180018001800180, 0x01f801f801f801f8,
+ 0x0600000000000000, 0x1000000010000000, 0x1000100010001000,
+ 0x1010101010101010, 0x1111111111111111, 0x1f001f001f001f00,
+ 0x1f1f1f1f1f1f1f1f, 0x1ffffffffffffffe, 0x3ffc3ffc3ffc3ffc,
+ 0x5555555555555555, 0x7f7f7f7f7f7f7f7f, 0x8000000000000000,
+ 0x8000001f8000001f, 0x8181818181818181, 0x9999999999999999,
+ 0x9fff9fff9fff9fff, 0xaaaaaaaaaaaaaaaa, 0xdddddddddddddddd,
+ 0xe0000000000001ff, 0xf800000000000000, 0xf8000000000001ff,
+ 0xf807f807f807f807, 0xfefefefefefefefe, 0xfffefffefffefffe,
+ 0xfffff807fffff807, 0xfffff9fffffff9ff, 0xfffffc0ffffffc0f,
+ 0xfffffc0fffffffff, 0xfffffefffffffeff, 0xfffffeffffffffff,
+ 0xffffff8000000000, 0xfffffffefffffffe, 0xffffffffefffffff,
+ 0xfffffffff9ffffff, 0xffffffffff800000, 0xffffffffffffc0ff,
+ 0xfffffffffffffffe};
+
+
+// ARM64 arithmetic instructions.
+struct AddSub {
+ MachInst2 mi;
+ ArchOpcode negate_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const AddSub& op) {
+ return os << op.mi;
+}
+
+
+static const AddSub kAddSubInstructions[] = {
+ {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+ kArm64Sub32},
+ {{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+ kArm64Sub},
+ {{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+ kArm64Add32},
+ {{&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64},
+ kArm64Add}};
+
+
+// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
+// Below is a combination of a random subset and some edge values.
+static const int32_t kAddSubImmediates[] = {
+ 0, 1, 69, 493, 599, 701, 719,
+ 768, 818, 842, 945, 1246, 1286, 1429,
+ 1669, 2171, 2179, 2182, 2254, 2334, 2338,
+ 2343, 2396, 2449, 2610, 2732, 2855, 2876,
+ 2944, 3377, 3458, 3475, 3476, 3540, 3574,
+ 3601, 3813, 3871, 3917, 4095, 4096, 16384,
+ 364544, 462848, 970752, 1523712, 1863680, 2363392, 3219456,
+ 3280896, 4247552, 4526080, 4575232, 4960256, 5505024, 5894144,
+ 6004736, 6193152, 6385664, 6795264, 7114752, 7233536, 7348224,
+ 7499776, 7573504, 7729152, 8634368, 8937472, 9465856, 10354688,
+ 10682368, 11059200, 11460608, 13168640, 13176832, 14336000, 15028224,
+ 15597568, 15892480, 16773120};
+
+
+// ARM64 flag setting data processing instructions.
+static const MachInst2 kDPFlagSetInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Tst, kMachInt64}};
+
+
+// ARM64 arithmetic with overflow instructions.
+static const MachInst2 kOvfAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kArm64Add32, kMachInt32},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kArm64Sub32, kMachInt32}};
+
+
+// ARM64 shift instructions.
+static const Shift kShiftInstructions[] = {
+ {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+ kMode_Operand2_R_LSL_I},
+ {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+ kMode_Operand2_R_LSL_I},
+ {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+ kMode_Operand2_R_LSR_I},
+ {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+ kMode_Operand2_R_LSR_I},
+ {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+ kMode_Operand2_R_ASR_I},
+ {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
+ kMode_Operand2_R_ASR_I},
+ {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+ kMode_Operand2_R_ROR_I},
+ {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64},
+ kMode_Operand2_R_ROR_I}};
+
+
+// ARM64 Mul/Div instructions.
+static const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32, kMachInt32},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kArm64Udiv, kMachInt64}};
+
+
+// ARM64 FP arithmetic instructions.
+static const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
+ kMachFloat64}};
+
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
+ return os << cmp.mi;
+}
+
+
+// ARM64 FP comparison instructions.
+static const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
+ kMachFloat64},
+ kUnorderedEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+ kArm64Float64Cmp, kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kArm64Float64Cmp, kMachFloat64},
+ kUnorderedLessThanOrEqual}};
+
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
+ return os << conv.mi;
+}
+
+
+// ARM64 type conversion instructions.
+static const Conversion kConversionInstructions[] = {
+ {{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
+ kArm64Float32ToFloat64, kMachFloat64},
+ kMachFloat32},
+ {{&RawMachineAssembler::TruncateFloat64ToFloat32,
+ "TruncateFloat64ToFloat32", kArm64Float64ToFloat32, kMachFloat32},
+ kMachFloat64},
+ {{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
+ kArm64Sxtw, kMachInt64},
+ kMachInt32},
+ {{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
+ kArm64Mov32, kMachUint64},
+ kMachUint32},
+ {{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
+ kArm64Mov32, kMachInt32},
+ kMachInt64},
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kArm64Int32ToFloat64, kMachFloat64},
+ kMachInt32},
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kArm64Uint32ToFloat64, kMachFloat64},
+ kMachUint32},
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kArm64Float64ToInt32, kMachInt32},
+ kMachFloat64},
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kArm64Float64ToUint32, kMachUint32},
+ kMachFloat64}};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Logical instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorLogicalTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ // TODO(all): Add support for testing 64-bit immediates.
+ if (type == kMachInt32) {
+ // Immediate on the right.
+ TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+
+ // Immediate on the left; all logical ops should commute.
+ TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test 64-bit shifted operands with 64-bit instructions.
+ if (shift.mi.machine_type != type) continue;
+
+ TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.mi.constructor)(m.Parameter(1),
+ BuildConstant(m, type, imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+
+ TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1),
+ BuildConstant(m, type, imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Add and Sub instructions.
+
+typedef InstructionSelectorTestWithParam<AddSub> InstructionSelectorAddSubTest;
+
+
+TEST_P(InstructionSelectorAddSubTest, Parameter) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return(
+ (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, NegImmediateOnRight) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return(
+ (m.*dpi.mi.constructor)(m.Parameter(0), BuildConstant(m, type, -imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.negate_arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test 64-bit shifted operands with 64-bit instructions.
+ if (shift.mi.machine_type != type) continue;
+
+ if ((shift.mi.arch_opcode == kArm64Ror32) ||
+ (shift.mi.arch_opcode == kArm64Ror)) {
+ // Not supported by add/sub instructions.
+ continue;
+ }
+
+ TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0),
+ (m.*shift.mi.constructor)(m.Parameter(1),
+ BuildConstant(m, type, imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
+ {
+ // 32-bit add.
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ {
+ // 64-bit add.
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Int64Add(m.Int64Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
+ // Subtraction with zero on the left maps to Neg.
+ {
+ // 32-bit subtract.
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ // 64-bit subtract.
+ StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
+ {
+ // 32-bit add.
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Int32Constant(-imm), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sub32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ {
+ // 64-bit add.
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Int64Add(m.Int64Constant(-imm), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sub, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
+ // 32-bit add.
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != kMachInt32) continue;
+ if (shift.mi.arch_opcode == kArm64Ror32) continue;
+
+ TRACED_FORRANGE(int, imm, 0, 31) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.Int32Add)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+
+ // 64-bit add.
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != kMachInt64) continue;
+ if (shift.mi.arch_opcode == kArm64Ror) continue;
+
+ TRACED_FORRANGE(int, imm, 0, 63) {
+ StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ m.Return((m.Int64Add)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Data processing controlled branches.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorDPFlagSetTest;
+
+
+TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorDPFlagSetTest,
+ ::testing::ValuesIn(kDPFlagSetInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
+ // Skip the cases where the instruction selector would use tbz/tbnz.
+ if (base::bits::CountPopulation32(imm) == 1) continue;
+
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
+ // Skip the cases where the instruction selector would use tbz/tbnz.
+ if (base::bits::CountPopulation64(imm) == 1) continue;
+
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
+ // Skip the cases where the instruction selector would use tbz/tbnz.
+ if (base::bits::CountPopulation32(imm) == 1) continue;
+
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ ASSERT_LE(1U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
+ // Skip the cases where the instruction selector would use tbz/tbnz.
+ if (base::bits::CountPopulation64(imm) == 1) continue;
+
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(m.Word64And(m.Int64Constant(imm), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ ASSERT_LE(1U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ ASSERT_LE(1U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(mask)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(
+ m.Word32BinaryNot(m.Word32And(m.Parameter(0), m.Int32Constant(mask))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Int32Constant(mask), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(
+ m.Word32BinaryNot(m.Word32And(m.Int32Constant(mask), m.Parameter(0))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = 1L << bit;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = 1L << bit;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(
+ m.Word64BinaryNot(m.Word64And(m.Parameter(0), m.Int64Constant(mask))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = 1L << bit;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = 1L << bit;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ MLabel a, b;
+ m.Branch(
+ m.Word64BinaryNot(m.Word64And(m.Int64Constant(mask), m.Parameter(0))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Add and subtract instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorOvfAddSubTest;
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return(
+ m.Projection(1, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return(m.Projection(
+ 1, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return(
+ m.Projection(0, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return(m.Projection(
+ 0, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ MLabel a, b;
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ MLabel a, b;
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorOvfAddSubTest,
+ ::testing::ValuesIn(kOvfAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Shift instructions.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameter) {
+ const Shift shift = GetParam();
+ const MachineType type = shift.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const Shift shift = GetParam();
+ const MachineType type = shift.mi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Mul and Div instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+
+namespace {
+
+struct MulDPInst {
+ const char* mul_constructor_name;
+ Node* (RawMachineAssembler::*mul_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*add_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*sub_constructor)(Node*, Node*);
+ ArchOpcode add_arch_opcode;
+ ArchOpcode sub_arch_opcode;
+ ArchOpcode neg_arch_opcode;
+ MachineType machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MulDPInst& inst) {
+ return os << inst.mul_constructor_name;
+}
+
+} // namespace
+
+
+static const MulDPInst kMulDPInstructions[] = {
+ {"Int32Mul", &RawMachineAssembler::Int32Mul, &RawMachineAssembler::Int32Add,
+ &RawMachineAssembler::Int32Sub, kArm64Madd32, kArm64Msub32, kArm64Mneg32,
+ kMachInt32},
+ {"Int64Mul", &RawMachineAssembler::Int64Mul, &RawMachineAssembler::Int64Add,
+ &RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
+ kMachInt64}};
+
+
+typedef InstructionSelectorTestWithParam<MulDPInst>
+ InstructionSelectorIntDPWithIntMulTest;
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.add_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return((m.*mdpi.add_constructor)(n, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, SubWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.sub_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(0));
+ m.Return((m.*mdpi.mul_constructor)(n, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(1));
+ m.Return((m.*mdpi.mul_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntDPWithIntMulTest,
+ ::testing::ValuesIn(kMulDPInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Floating point instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode ldr_opcode;
+ ArchOpcode str_opcode;
+ const int32_t immediates[20];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ return os << memacc.type;
+}
+
+} // namespace
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8,
+ kArm64Ldrsb,
+ kArm64Strb,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
+ 2442, 4093, 4094, 4095}},
+ {kMachUint8,
+ kArm64Ldrb,
+ kArm64Strb,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
+ 2442, 4093, 4094, 4095}},
+ {kMachInt16,
+ kArm64Ldrsh,
+ kArm64Strh,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
+ 4242, 6786, 8188, 8190}},
+ {kMachUint16,
+ kArm64Ldrh,
+ kArm64Strh,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
+ 4242, 6786, 8188, 8190}},
+ {kMachInt32,
+ kArm64LdrW,
+ kArm64StrW,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
+ 3276, 3280, 16376, 16380}},
+ {kMachUint32,
+ kArm64LdrW,
+ kArm64StrW,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
+ 3276, 3280, 16376, 16380}},
+ {kMachInt64,
+ kArm64Ldr,
+ kArm64Str,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
+ 16384, 16392, 32752, 32760}},
+ {kMachUint64,
+ kArm64Ldr,
+ kArm64Str,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
+ 16384, 16392, 32752, 32760}},
+ {kMachFloat32,
+ kArm64LdrS,
+ kArm64StrS,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
+ 3276, 3280, 16376, 16380}},
+ {kMachFloat64,
+ kArm64LdrD,
+ kArm64StrD,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
+ 16384, 16392, 32752, 32760}}};
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Comparison instructions.
+
+static const MachInst2 kComparisonInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
+ {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, WithParameters) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisonInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous
+
+
+static const MachInst2 kLogicalWithNotRHSs[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32, kMachInt32},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic, kMachInt64},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32, kMachInt32},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn, kMachInt64},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32, kMachInt32},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon, kMachInt64}};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalWithNotRHSTest;
+
+
+TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
+ const MachInst2 inst = GetParam();
+ const MachineType type = inst.machine_type;
+ // Test cases where RHS is Xor(x, -1).
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return((m.*inst.constructor)(
+ m.Parameter(0), m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return((m.*inst.constructor)(
+ m.Parameter(0), m.Word64Xor(m.Parameter(1), m.Int64Constant(-1))));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return((m.*inst.constructor)(
+ m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)), m.Parameter(1)));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return((m.*inst.constructor)(
+ m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)), m.Parameter(1)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Test cases where RHS is Not(x).
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return(
+ (m.*inst.constructor)(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return(
+ (m.*inst.constructor)(m.Parameter(0), m.Word64Not(m.Parameter(1))));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return(
+ (m.*inst.constructor)(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return(
+ (m.*inst.constructor)(m.Word64Not(m.Parameter(0)), m.Parameter(1)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalWithNotRHSTest,
+ ::testing::ValuesIn(kLogicalWithNotRHSs));
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word64NotWithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk >>= 32 - lsb;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk >>= 32 - lsb;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk >>= 64 - lsb;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
+ m.Int64Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk >>= 64 - lsb;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
+ m.Int64Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
+ TRACED_FORRANGE(int64_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb)),
+ m.Int64Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int64_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64And(m.Int64Constant(msk),
+ m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Int32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Smull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArm64Asr, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(32, s.ToInt64(s[1]->InputAt(1)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
new file mode 100644
index 0000000000..17d65131d1
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
@@ -0,0 +1,456 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stubs.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ChangeLoweringTest : public GraphTest {
+ public:
+ ChangeLoweringTest() : simplified_(zone()) {}
+ virtual ~ChangeLoweringTest() {}
+
+ virtual MachineType WordRepresentation() const = 0;
+
+ protected:
+ int HeapNumberValueOffset() const {
+ STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
+ return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
+ kHeapObjectTag;
+ }
+ bool Is32() const { return WordRepresentation() == kRepWord32; }
+ int PointerSize() const {
+ switch (WordRepresentation()) {
+ case kRepWord32:
+ return 4;
+ case kRepWord64:
+ return 8;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+ }
+ int SmiMaxValue() const { return -(SmiMinValue() + 1); }
+ int SmiMinValue() const {
+ return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
+ }
+ int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
+ int SmiShiftSize() const {
+ return Is32() ? SmiTagging<4>::SmiShiftSize()
+ : SmiTagging<8>::SmiShiftSize();
+ }
+ int SmiValueSize() const {
+ return Is32() ? SmiTagging<4>::SmiValueSize()
+ : SmiTagging<8>::SmiValueSize();
+ }
+
+ Node* Parameter(int32_t index = 0) {
+ return graph()->NewNode(common()->Parameter(index), graph()->start());
+ }
+
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(WordRepresentation());
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &machine);
+ CompilationInfo info(isolate(), zone());
+ Linkage linkage(zone(), &info);
+ ChangeLowering reducer(&jsgraph, &linkage);
+ return reducer.Reduce(node);
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
+ AllocateHeapNumberStub(isolate()).GetCode())),
+ IsNumberConstant(0.0), effect_matcher, control_matcher);
+ }
+ Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return IsLoad(kMachFloat64, value_matcher,
+ IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
+ control_matcher);
+ }
+ Matcher<Node*> IsIntPtrConstant(int value) {
+ return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
+ }
+ Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
+ : IsWord64Equal(lhs_matcher, rhs_matcher);
+ }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Common.
+
+
+class ChangeLoweringCommonTest
+ : public ChangeLoweringTest,
+ public ::testing::WithParamInterface<MachineType> {
+ public:
+ virtual ~ChangeLoweringCommonTest() {}
+
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return GetParam();
+ }
+};
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsSelect(static_cast<MachineType>(kTypeBool | kRepTagged), val,
+ IsTrueConstant(), IsFalseConstant()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* finish = reduction.replacement();
+ Capture<Node*> heap_number;
+ EXPECT_THAT(
+ finish,
+ IsFinish(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
+ IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsIntPtrConstant(HeapNumberValueOffset()), val,
+ CaptureEq(&heap_number), graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
+ Node* node =
+ graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
+ Reduction reduction = Reduce(node);
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
+ ::testing::Values(kRepWord32, kRepWord64));
+
+
+// -----------------------------------------------------------------------------
+// 32-bit
+
+
+class ChangeLowering32Test : public ChangeLoweringTest {
+ public:
+ virtual ~ChangeLowering32Test() {}
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return kRepWord32;
+ }
+};
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> add, branch, heap_number, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachAnyTagged,
+ IsFinish(AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_true))),
+ IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsIntPtrConstant(HeapNumberValueOffset()),
+ IsChangeInt32ToFloat64(val),
+ CaptureEq(&heap_number), CaptureEq(&if_true))),
+ IsProjection(
+ 0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(IsProjection(1, CaptureEq(&add)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
+ IsMerge(
+ AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachInt32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachUint32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, heap_number, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
+ IsFinish(AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumberValueOffset()),
+ IsChangeUint32ToFloat64(val),
+ CaptureEq(&heap_number), CaptureEq(&if_false))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ val, IsInt32Constant(SmiMaxValue())),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// 64-bit
+
+
+class ChangeLowering64Test : public ChangeLoweringTest {
+ public:
+ virtual ~ChangeLowering64Test() {}
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return kRepWord64;
+ }
+};
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ EXPECT_THAT(reduction.replacement(),
+ IsWord64Shl(IsChangeInt32ToInt64(val),
+ IsInt64Constant(SmiShiftAmount())));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
+ IsMerge(
+ AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachInt32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachUint32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, heap_number, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
+ IsInt64Constant(SmiShiftAmount())),
+ IsFinish(AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsInt64Constant(HeapNumberValueOffset()),
+ IsChangeUint32ToFloat64(val),
+ CaptureEq(&heap_number), CaptureEq(&if_false))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ val, IsInt32Constant(SmiMaxValue())),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
new file mode 100644
index 0000000000..2c88c4bbd7
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -0,0 +1,292 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include <limits>
+
+#include "src/compiler/operator-properties-inl.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// -----------------------------------------------------------------------------
+// Shared operators.
+
+
+namespace {
+
+struct SharedOperator {
+ const Operator* (CommonOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+ int effect_input_count;
+ int control_input_count;
+ int effect_output_count;
+ int control_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
+ return os << IrOpcode::Mnemonic(fop.opcode);
+}
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, effect_output_count, control_output_count) \
+ { \
+ &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
+ value_input_count, effect_input_count, control_input_count, \
+ effect_output_count, control_output_count \
+ }
+ SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
+ SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
+ SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
+ SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
+ SHARED(Throw, Operator::kFoldable, 1, 1, 1, 0, 1),
+ SHARED(Return, Operator::kNoProperties, 1, 1, 1, 0, 1)
+#undef SHARED
+};
+
+
+class CommonSharedOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<SharedOperator> {};
+
+} // namespace
+
+
+TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
+ const SharedOperator& sop = GetParam();
+ CommonOperatorBuilder common1(zone());
+ CommonOperatorBuilder common2(zone());
+ EXPECT_EQ((common1.*sop.constructor)(), (common2.*sop.constructor)());
+}
+
+
+TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+
+ EXPECT_EQ(sop.value_input_count, op->ValueInputCount());
+ EXPECT_EQ(sop.effect_input_count, op->EffectInputCount());
+ EXPECT_EQ(sop.control_input_count, op->ControlInputCount());
+ EXPECT_EQ(
+ sop.value_input_count + sop.effect_input_count + sop.control_input_count,
+ OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
+ EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
+}
+
+
+TEST_P(CommonSharedOperatorTest, OpcodeIsCorrect) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+ EXPECT_EQ(sop.opcode, op->opcode());
+}
+
+
+TEST_P(CommonSharedOperatorTest, Properties) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+ EXPECT_EQ(sop.properties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
+ ::testing::ValuesIn(kSharedOperators));
+
+
+// -----------------------------------------------------------------------------
+// Other operators.
+
+
+namespace {
+
+class CommonOperatorTest : public TestWithZone {
+ public:
+ CommonOperatorTest() : common_(zone()) {}
+ virtual ~CommonOperatorTest() {}
+
+ CommonOperatorBuilder* common() { return &common_; }
+
+ private:
+ CommonOperatorBuilder common_;
+};
+
+
+const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
+
+
+const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
+ std::numeric_limits<float>::min(),
+ -1.0f,
+ -0.0f,
+ 0.0f,
+ 1.0f,
+ std::numeric_limits<float>::max(),
+ std::numeric_limits<float>::infinity(),
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::signaling_NaN()};
+
+
+const double kDoubleValues[] = {-std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::min(),
+ -1.0,
+ -0.0,
+ 0.0,
+ 1.0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::signaling_NaN()};
+
+
+const BranchHint kHints[] = {BranchHint::kNone, BranchHint::kTrue,
+ BranchHint::kFalse};
+
+} // namespace
+
+
+TEST_F(CommonOperatorTest, Branch) {
+ TRACED_FOREACH(BranchHint, hint, kHints) {
+ const Operator* const op = common()->Branch(hint);
+ EXPECT_EQ(IrOpcode::kBranch, op->opcode());
+ EXPECT_EQ(Operator::kFoldable, op->properties());
+ EXPECT_EQ(hint, BranchHintOf(op));
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(2, op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Select) {
+ static const MachineType kTypes[] = {
+ kMachInt8, kMachUint8, kMachInt16, kMachUint16,
+ kMachInt32, kMachUint32, kMachInt64, kMachUint64,
+ kMachFloat32, kMachFloat64, kMachAnyTagged};
+ TRACED_FOREACH(MachineType, type, kTypes) {
+ TRACED_FOREACH(BranchHint, hint, kHints) {
+ const Operator* const op = common()->Select(type, hint);
+ EXPECT_EQ(IrOpcode::kSelect, op->opcode());
+ EXPECT_EQ(Operator::kPure, op->properties());
+ EXPECT_EQ(type, SelectParametersOf(op).type());
+ EXPECT_EQ(hint, SelectParametersOf(op).hint());
+ EXPECT_EQ(3, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(0, op->ControlInputCount());
+ EXPECT_EQ(3, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+ }
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Float32Constant) {
+ TRACED_FOREACH(float, value, kFloatValues) {
+ const Operator* op = common()->Float32Constant(value);
+ EXPECT_PRED2(base::bit_equal_to<float>(), value, OpParameter<float>(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+ TRACED_FOREACH(float, v1, kFloatValues) {
+ TRACED_FOREACH(float, v2, kFloatValues) {
+ const Operator* op1 = common()->Float32Constant(v1);
+ const Operator* op2 = common()->Float32Constant(v2);
+ EXPECT_EQ(bit_cast<uint32_t>(v1) == bit_cast<uint32_t>(v2),
+ op1->Equals(op2));
+ }
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Float64Constant) {
+ TRACED_FOREACH(double, value, kFloatValues) {
+ const Operator* op = common()->Float64Constant(value);
+ EXPECT_PRED2(base::bit_equal_to<double>(), value, OpParameter<double>(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+ TRACED_FOREACH(double, v1, kFloatValues) {
+ TRACED_FOREACH(double, v2, kFloatValues) {
+ const Operator* op1 = common()->Float64Constant(v1);
+ const Operator* op2 = common()->Float64Constant(v2);
+ EXPECT_EQ(bit_cast<uint64_t>(v1) == bit_cast<uint64_t>(v2),
+ op1->Equals(op2));
+ }
+ }
+}
+
+
+TEST_F(CommonOperatorTest, NumberConstant) {
+ TRACED_FOREACH(double, value, kFloatValues) {
+ const Operator* op = common()->NumberConstant(value);
+ EXPECT_PRED2(base::bit_equal_to<double>(), value, OpParameter<double>(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+ TRACED_FOREACH(double, v1, kFloatValues) {
+ TRACED_FOREACH(double, v2, kFloatValues) {
+ const Operator* op1 = common()->NumberConstant(v1);
+ const Operator* op2 = common()->NumberConstant(v2);
+ EXPECT_EQ(bit_cast<uint64_t>(v1) == bit_cast<uint64_t>(v2),
+ op1->Equals(op2));
+ }
+ }
+}
+
+
+TEST_F(CommonOperatorTest, ValueEffect) {
+ TRACED_FOREACH(int, arguments, kArguments) {
+ const Operator* op = common()->ValueEffect(arguments);
+ EXPECT_EQ(arguments, op->ValueInputCount());
+ EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Finish) {
+ TRACED_FOREACH(int, arguments, kArguments) {
+ const Operator* op = common()->Finish(arguments);
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(arguments, op->EffectInputCount());
+ EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/compiler-test-utils.h b/deps/v8/test/unittests/compiler/compiler-test-utils.h
new file mode 100644
index 0000000000..6ce28f9f94
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/compiler-test-utils.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_COMPILER_TEST_UTILS_H_
+#define V8_UNITTESTS_COMPILER_COMPILER_TEST_UTILS_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The TARGET_TEST(Case, Name) macro works just like
+// TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST(Case, Name) TEST(Case, Name)
+#else
+#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_F(Case, Name) macro works just like
+// TEST_F(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
+#else
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_P(Case, Name) macro works just like
+// TEST_P(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
+#else
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TYPED_TEST(Case, Name) macro works just like
+// TYPED_TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
+#else
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
+#endif
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_COMPILER_TEST_UTILS_H_
diff --git a/deps/v8/test/unittests/compiler/diamond-unittest.cc b/deps/v8/test/unittests/compiler/diamond-unittest.cc
new file mode 100644
index 0000000000..c14886fbb7
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/diamond-unittest.cc
@@ -0,0 +1,161 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class DiamondTest : public GraphTest {
+ public:
+ DiamondTest() : GraphTest(5) {}
+};
+
+
+TEST_F(DiamondTest, SimpleDiamond) {
+ Node* p = Parameter(0);
+ Diamond d(graph(), common(), p);
+ EXPECT_THAT(d.branch, IsBranch(p, graph()->start()));
+ EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+ EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+ EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondChainDiamond) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Diamond d0(graph(), common(), p0);
+ Diamond d1(graph(), common(), p1);
+ d1.Chain(d0);
+ EXPECT_THAT(d1.branch, IsBranch(p1, d0.merge));
+ EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+}
+
+
+TEST_F(DiamondTest, DiamondChainNode) {
+ Node* p1 = Parameter(1);
+ Diamond d1(graph(), common(), p1);
+ Node* other = graph()->NewNode(common()->Merge(0));
+ d1.Chain(other);
+ EXPECT_THAT(d1.branch, IsBranch(p1, other));
+}
+
+
+TEST_F(DiamondTest, DiamondChainN) {
+ Node* params[5] = {Parameter(0), Parameter(1), Parameter(2), Parameter(3),
+ Parameter(4)};
+ Diamond d[5] = {Diamond(graph(), common(), params[0]),
+ Diamond(graph(), common(), params[1]),
+ Diamond(graph(), common(), params[2]),
+ Diamond(graph(), common(), params[3]),
+ Diamond(graph(), common(), params[4])};
+
+ for (int i = 1; i < 5; i++) {
+ d[i].Chain(d[i - 1]);
+ EXPECT_THAT(d[i].branch, IsBranch(params[i], d[i - 1].merge));
+ }
+}
+
+
+TEST_F(DiamondTest, DiamondNested_true) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Diamond d0(graph(), common(), p0);
+ Diamond d1(graph(), common(), p1);
+
+ d1.Nest(d0, true);
+
+ EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+ EXPECT_THAT(d0.if_true, IsIfTrue(d0.branch));
+ EXPECT_THAT(d0.if_false, IsIfFalse(d0.branch));
+ EXPECT_THAT(d0.merge, IsMerge(d1.merge, d0.if_false));
+
+ EXPECT_THAT(d1.branch, IsBranch(p1, d0.if_true));
+ EXPECT_THAT(d1.if_true, IsIfTrue(d1.branch));
+ EXPECT_THAT(d1.if_false, IsIfFalse(d1.branch));
+ EXPECT_THAT(d1.merge, IsMerge(d1.if_true, d1.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondNested_false) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Diamond d0(graph(), common(), p0);
+ Diamond d1(graph(), common(), p1);
+
+ d1.Nest(d0, false);
+
+ EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+ EXPECT_THAT(d0.if_true, IsIfTrue(d0.branch));
+ EXPECT_THAT(d0.if_false, IsIfFalse(d0.branch));
+ EXPECT_THAT(d0.merge, IsMerge(d0.if_true, d1.merge));
+
+ EXPECT_THAT(d1.branch, IsBranch(p1, d0.if_false));
+ EXPECT_THAT(d1.if_true, IsIfTrue(d1.branch));
+ EXPECT_THAT(d1.if_false, IsIfFalse(d1.branch));
+ EXPECT_THAT(d1.merge, IsMerge(d1.if_true, d1.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondPhis) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* p2 = Parameter(2);
+ Diamond d(graph(), common(), p0);
+
+ MachineType types[] = {kMachAnyTagged, kMachUint32, kMachInt32};
+
+ for (size_t i = 0; i < arraysize(types); i++) {
+ Node* phi = d.Phi(types[i], p1, p2);
+
+ EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
+ EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+ EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+ EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+ EXPECT_THAT(phi, IsPhi(types[i], p1, p2, d.merge));
+ }
+}
+
+
+TEST_F(DiamondTest, DiamondEffectPhis) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* p2 = Parameter(2);
+ Diamond d(graph(), common(), p0);
+
+ Node* phi = d.EffectPhi(p1, p2);
+
+ EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
+ EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+ EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+ EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+ EXPECT_THAT(phi, IsEffectPhi(p1, p2, d.merge));
+}
+
+
+TEST_F(DiamondTest, BranchHint) {
+ Diamond dn(graph(), common(), Parameter(0));
+ CHECK(BranchHint::kNone == BranchHintOf(dn.branch->op()));
+
+ Diamond dt(graph(), common(), Parameter(0), BranchHint::kTrue);
+ CHECK(BranchHint::kTrue == BranchHintOf(dt.branch->op()));
+
+ Diamond df(graph(), common(), Parameter(0), BranchHint::kFalse);
+ CHECK(BranchHint::kFalse == BranchHintOf(df.branch->op()));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
new file mode 100644
index 0000000000..88f25f7b1b
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -0,0 +1,123 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::DefaultValue;
+using testing::Return;
+using testing::Sequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct TestOperator : public Operator {
+ TestOperator(Operator::Opcode opcode, Operator::Properties properties,
+ size_t value_in, size_t value_out)
+ : Operator(opcode, properties, "TestOp", value_in, 0, 0, value_out, 0,
+ 0) {}
+};
+
+
+namespace {
+
+TestOperator OP0(0, Operator::kNoWrite, 0, 1);
+TestOperator OP1(1, Operator::kNoProperties, 1, 1);
+
+
+struct MockReducer : public Reducer {
+ MOCK_METHOD1(Reduce, Reduction(Node*));
+};
+
+} // namespace
+
+
+class GraphReducerTest : public TestWithZone {
+ public:
+ GraphReducerTest() : graph_(zone()) {}
+
+ static void SetUpTestCase() {
+ TestWithZone::SetUpTestCase();
+ DefaultValue<Reduction>::Set(Reducer::NoChange());
+ }
+
+ static void TearDownTestCase() {
+ DefaultValue<Reduction>::Clear();
+ TestWithZone::TearDownTestCase();
+ }
+
+ protected:
+ void ReduceNode(Node* node, Reducer* r) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r);
+ reducer.ReduceNode(node);
+ }
+
+ void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.ReduceNode(node);
+ }
+
+ void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.AddReducer(r3);
+ reducer.ReduceNode(node);
+ }
+
+ Graph* graph() { return &graph_; }
+
+ private:
+ Graph graph_;
+};
+
+
+TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
+ StrictMock<MockReducer> r;
+ Node* node0 = graph()->NewNode(&OP0);
+ Node* node1 = graph()->NewNode(&OP1, node0);
+ Node* node2 = graph()->NewNode(&OP1, node0);
+ EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
+ ReduceNode(node1, &r);
+ EXPECT_FALSE(node0->IsDead());
+ EXPECT_TRUE(node1->IsDead());
+ EXPECT_FALSE(node2->IsDead());
+}
+
+
+TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
+ StrictMock<MockReducer> r1, r2;
+ Node* node0 = graph()->NewNode(&OP0);
+ EXPECT_CALL(r1, Reduce(node0));
+ EXPECT_CALL(r2, Reduce(node0));
+ ReduceNode(node0, &r1, &r2);
+}
+
+
+TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
+ Sequence s1, s2, s3;
+ StrictMock<MockReducer> r1, r2, r3;
+ Node* node0 = graph()->NewNode(&OP0);
+ EXPECT_CALL(r1, Reduce(node0));
+ EXPECT_CALL(r2, Reduce(node0));
+ EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2, s3).WillOnce(
+ Return(Reducer::Changed(node0)));
+ EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
+ EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
+ EXPECT_CALL(r3, Reduce(node0)).InSequence(s3);
+ ReduceNode(node0, &r1, &r2, &r3);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
new file mode 100644
index 0000000000..2cfd23a125
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/graph-unittest.h"
+
+#include <ostream> // NOLINT(readability/streams)
+
+#include "src/compiler/node-properties-inl.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
+ graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+}
+
+
+GraphTest::~GraphTest() {}
+
+
+Node* GraphTest::Parameter(int32_t index) {
+ return graph()->NewNode(common()->Parameter(index), graph()->start());
+}
+
+
+Node* GraphTest::Float32Constant(volatile float value) {
+ return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
+ return graph()->NewNode(common()->Float64Constant(value));
+}
+
+
+Node* GraphTest::Int32Constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+}
+
+
+Node* GraphTest::Int64Constant(int64_t value) {
+ return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+Node* GraphTest::NumberConstant(volatile double value) {
+ return graph()->NewNode(common()->NumberConstant(value));
+}
+
+
+Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
+ return HeapConstant(Unique<HeapObject>::CreateUninitialized(value));
+}
+
+
+Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
+ Node* node = graph()->NewNode(common()->HeapConstant(value));
+ Type* type = Type::Constant(value.handle(), zone());
+ NodeProperties::SetBounds(node, Bounds(type));
+ return node;
+}
+
+
+Node* GraphTest::FalseConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Node* GraphTest::TrueConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+
+Node* GraphTest::UndefinedConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsFalseConstant() {
+ return IsHeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsTrueConstant() {
+ return IsHeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
new file mode 100644
index 0000000000..e63d9485de
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/typer.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+template <class T>
+class Handle;
+class HeapObject;
+template <class T>
+class Unique;
+
+namespace compiler {
+
+using ::testing::Matcher;
+
+
+class GraphTest : public TestWithContext, public TestWithZone {
+ public:
+ explicit GraphTest(int parameters = 1);
+ virtual ~GraphTest();
+
+ protected:
+ Node* Parameter(int32_t index);
+ Node* Float32Constant(volatile float value);
+ Node* Float64Constant(volatile double value);
+ Node* Int32Constant(int32_t value);
+ Node* Uint32Constant(uint32_t value) {
+ return Int32Constant(bit_cast<int32_t>(value));
+ }
+ Node* Int64Constant(int64_t value);
+ Node* NumberConstant(volatile double value);
+ Node* HeapConstant(const Handle<HeapObject>& value);
+ Node* HeapConstant(const Unique<HeapObject>& value);
+ Node* FalseConstant();
+ Node* TrueConstant();
+ Node* UndefinedConstant();
+
+ Matcher<Node*> IsFalseConstant();
+ Matcher<Node*> IsTrueConstant();
+
+ CommonOperatorBuilder* common() { return &common_; }
+ Graph* graph() { return &graph_; }
+
+ private:
+ CommonOperatorBuilder common_;
+ Graph graph_;
+};
+
+
+class TypedGraphTest : public GraphTest {
+ public:
+ explicit TypedGraphTest(int parameters = 1)
+ : GraphTest(parameters), typer_(graph(), MaybeHandle<Context>()) {}
+
+ protected:
+ Typer* typer() { return &typer_; }
+
+ private:
+ Typer typer_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
new file mode 100644
index 0000000000..d1a17260a2
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -0,0 +1,606 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+
+} // namespace
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Better left operand for commutative binops
+
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* add = m.Int32Add(param1, param2);
+ m.Return(m.Int32Add(add, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* mul = m.Int32Mul(param1, param2);
+ m.Return(m.Int32Mul(mul, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachUint32);
+ m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ return os << memacc.type;
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kIA32Movsxbl, kIA32Movb},
+ {kMachUint8, kIA32Movzxbl, kIA32Movb},
+ {kMachInt16, kIA32Movsxwl, kIA32Movw},
+ {kMachUint16, kIA32Movzxwl, kIA32Movw},
+ {kMachInt32, kIA32Movl, kIA32Movl},
+ {kMachUint32, kIA32Movl, kIA32Movl},
+ {kMachFloat32, kIA32Movss, kIA32Movss},
+ {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, base, kImmediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ if (base == 0) {
+ ASSERT_EQ(1U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, kImmediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ if (index == 0) {
+ ASSERT_EQ(1U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, base, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ if (base == 0) {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ if (index == 0) {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// AddressingMode for loads and stores.
+
+
+class AddressingModeUnitTest : public InstructionSelectorTest {
+ public:
+ AddressingModeUnitTest() : m(NULL) { Reset(); }
+ ~AddressingModeUnitTest() { delete m; }
+
+ void Run(Node* base, Node* index, AddressingMode mode) {
+ Node* load = m->Load(kMachInt32, base, index);
+ m->Store(kMachInt32, base, index, load);
+ m->Return(m->Int32Constant(0));
+ Stream s = m->Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(mode, s[0]->addressing_mode());
+ EXPECT_EQ(mode, s[1]->addressing_mode());
+ }
+
+ Node* zero;
+ Node* null_ptr;
+ Node* non_zero;
+ Node* base_reg; // opaque value to generate base as register
+ Node* index_reg; // opaque value to generate index as register
+ Node* scales[4];
+ StreamBuilder* m;
+
+ void Reset() {
+ delete m;
+ m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
+ zero = m->Int32Constant(0);
+ null_ptr = m->Int32Constant(0);
+ non_zero = m->Int32Constant(127);
+ base_reg = m->Parameter(0);
+ index_reg = m->Parameter(0);
+
+ scales[0] = m->Int32Constant(1);
+ scales[1] = m->Int32Constant(2);
+ scales[2] = m->Int32Constant(4);
+ scales[3] = m->Int32Constant(8);
+ }
+};
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
+ Node* base = base_reg;
+ Node* index = zero;
+ Run(base, index, kMode_MR);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
+ Node* base = base_reg;
+ Node* index = non_zero;
+ Run(base, index, kMode_MRI);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
+ Node* base = base_reg;
+ Node* index = index_reg;
+ Run(base, index, kMode_MR1);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
+ AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
+ Node* base = base_reg;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_MR1I);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
+ AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
+ Node* base = null_ptr;
+ Node* index = index_reg;
+ Run(base, index, kMode_M1);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
+ AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_M1I);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
+ AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
+ Node* bases[] = {null_ptr, non_zero};
+ Node* indices[] = {zero, non_zero};
+ for (size_t i = 0; i < arraysize(bases); ++i) {
+ for (size_t j = 0; j < arraysize(indices); ++j) {
+ Reset();
+ Node* base = bases[i];
+ Node* index = indices[j];
+ Run(base, index, kMode_MI);
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Multiplication.
+
+
+namespace {
+
+struct MultParam {
+ int value;
+ bool lea_expected;
+ AddressingMode addressing_mode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MultParam& m) {
+ return os << m.value << "." << m.lea_expected << "." << m.addressing_mode;
+}
+
+
+const MultParam kMultParams[] = {{-1, false, kMode_None},
+ {0, false, kMode_None},
+ {1, true, kMode_M1},
+ {2, true, kMode_M2},
+ {3, true, kMode_MR2},
+ {4, true, kMode_M4},
+ {5, true, kMode_MR4},
+ {6, false, kMode_None},
+ {7, false, kMode_None},
+ {8, true, kMode_M8},
+ {9, true, kMode_MR8},
+ {10, false, kMode_None},
+ {11, false, kMode_None}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MultParam> InstructionSelectorMultTest;
+
+
+static unsigned InputCountForLea(AddressingMode mode) {
+ switch (mode) {
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I:
+ return 3U;
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I:
+ return 2U;
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8:
+ return 2U;
+ case kMode_M1:
+ case kMode_M2:
+ case kMode_M4:
+ case kMode_M8:
+ return 1U;
+ default:
+ UNREACHABLE();
+ return 0U;
+ }
+}
+
+
+static AddressingMode AddressingModeForAddMult(const MultParam& m) {
+ switch (m.addressing_mode) {
+ case kMode_MR1:
+ return kMode_MR1I;
+ case kMode_MR2:
+ return kMode_MR2I;
+ case kMode_MR4:
+ return kMode_MR4I;
+ case kMode_MR8:
+ return kMode_MR8I;
+ case kMode_M1:
+ return kMode_M1I;
+ case kMode_M2:
+ return kMode_M2I;
+ case kMode_M4:
+ return kMode_M4I;
+ case kMode_M8:
+ return kMode_M8I;
+ default:
+ UNREACHABLE();
+ return kMode_None;
+ }
+}
+
+
+TEST_P(InstructionSelectorMultTest, Mult32) {
+ const MultParam m_param = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* param = m.Parameter(0);
+ Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value));
+ m.Return(mult);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode());
+ if (m_param.lea_expected) {
+ EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+ ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount());
+ } else {
+ EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ }
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_P(InstructionSelectorMultTest, MultAdd32) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ const MultParam m_param = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* param = m.Parameter(0);
+ Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
+ m.Int32Constant(imm));
+ m.Return(mult);
+ Stream s = m.Build();
+ if (m_param.lea_expected) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+ EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
+ unsigned input_count = InputCountForLea(s[0]->addressing_mode());
+ ASSERT_EQ(input_count, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE,
+ s[0]->InputAt(input_count - 1)->kind());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+ } else {
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
+ EXPECT_EQ(kIA32Add, s[1]->arch_opcode());
+ }
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
+ ::testing::ValuesIn(kMultParams));
+
+
+TEST_F(InstructionSelectorTest, Int32MulHigh) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Int32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32ImulHigh, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), eax));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), edx));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
new file mode 100644
index 0000000000..c79a9e4eaa
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -0,0 +1,589 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+#include "src/compiler/graph-inl.h"
+#include "src/flags.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+} // namespace
+
+
+InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
+
+
+InstructionSelectorTest::~InstructionSelectorTest() {}
+
+
+InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
+ InstructionSelector::Features features,
+ InstructionSelectorTest::StreamBuilderMode mode) {
+ Schedule* schedule = Export();
+ if (FLAG_trace_turbo) {
+ OFStream out(stdout);
+ out << "=== Schedule before instruction selection ===" << std::endl
+ << *schedule;
+ }
+ EXPECT_NE(0, graph()->NodeCount());
+ int initial_node_count = graph()->NodeCount();
+ Linkage linkage(test_->zone(), call_descriptor());
+ InstructionBlocks* instruction_blocks =
+ InstructionSequence::InstructionBlocksFor(test_->zone(), schedule);
+ InstructionSequence sequence(test_->zone(), instruction_blocks);
+ SourcePositionTable source_position_table(graph());
+ InstructionSelector selector(test_->zone(), graph(), &linkage, &sequence,
+ schedule, &source_position_table, features);
+ selector.SelectInstructions();
+ if (FLAG_trace_turbo) {
+ OFStream out(stdout);
+ PrintableInstructionSequence printable = {
+ RegisterConfiguration::ArchDefault(), &sequence};
+ out << "=== Code sequence after instruction selection ===" << std::endl
+ << printable;
+ }
+ Stream s;
+ // Map virtual registers.
+ {
+ const NodeToVregMap& node_map = selector.GetNodeMapForTesting();
+ for (int i = 0; i < initial_node_count; ++i) {
+ if (node_map[i] != InstructionSelector::kNodeUnmapped) {
+ s.virtual_registers_.insert(std::make_pair(i, node_map[i]));
+ }
+ }
+ }
+ std::set<int> virtual_registers;
+ for (InstructionSequence::const_iterator i = sequence.begin();
+ i != sequence.end(); ++i) {
+ Instruction* instr = *i;
+ if (instr->opcode() < 0) continue;
+ if (mode == kTargetInstructions) {
+ switch (instr->arch_opcode()) {
+#define CASE(Name) \
+ case k##Name: \
+ break;
+ TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ default:
+ continue;
+ }
+ }
+ if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
+ continue;
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ InstructionOperand* output = instr->OutputAt(i);
+ EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
+ if (output->IsConstant()) {
+ s.constants_.insert(std::make_pair(
+ output->index(), sequence.GetConstant(output->index())));
+ virtual_registers.insert(output->index());
+ } else if (output->IsUnallocated()) {
+ virtual_registers.insert(
+ UnallocatedOperand::cast(output)->virtual_register());
+ }
+ }
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ InstructionOperand* input = instr->InputAt(i);
+ EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
+ if (input->IsImmediate()) {
+ s.immediates_.insert(std::make_pair(
+ input->index(), sequence.GetImmediate(input->index())));
+ } else if (input->IsUnallocated()) {
+ virtual_registers.insert(
+ UnallocatedOperand::cast(input)->virtual_register());
+ }
+ }
+ s.instructions_.push_back(instr);
+ }
+ for (std::set<int>::const_iterator i = virtual_registers.begin();
+ i != virtual_registers.end(); ++i) {
+ int virtual_register = *i;
+ if (sequence.IsDouble(virtual_register)) {
+ EXPECT_FALSE(sequence.IsReference(virtual_register));
+ s.doubles_.insert(virtual_register);
+ }
+ if (sequence.IsReference(virtual_register)) {
+ EXPECT_FALSE(sequence.IsDouble(virtual_register));
+ s.references_.insert(virtual_register);
+ }
+ }
+ for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
+ s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
+ InstructionSequence::StateId::FromInt(i)));
+ }
+ return s;
+}
+
+
+int InstructionSelectorTest::Stream::ToVreg(const Node* node) const {
+ VirtualRegisters::const_iterator i = virtual_registers_.find(node->id());
+ CHECK(i != virtual_registers_.end());
+ return i->second;
+}
+
+
+bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
+ Register reg) const {
+ if (!operand->IsUnallocated()) return false;
+ const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
+ if (!unallocated->HasFixedRegisterPolicy()) return false;
+ const int index = Register::ToAllocationIndex(reg);
+ return unallocated->fixed_register_index() == index;
+}
+
+
+bool InstructionSelectorTest::Stream::IsSameAsFirst(
+ const InstructionOperand* operand) const {
+ if (!operand->IsUnallocated()) return false;
+ const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
+ return unallocated->HasSameAsInputPolicy();
+}
+
+
+bool InstructionSelectorTest::Stream::IsUsedAtStart(
+ const InstructionOperand* operand) const {
+ if (!operand->IsUnallocated()) return false;
+ const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
+ return unallocated->IsUsedAtStart();
+}
+
+
+// -----------------------------------------------------------------------------
+// Return.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
+ const float kValue = 4.2f;
+ StreamBuilder m(this, kMachFloat32);
+ m.Return(m.Float32Constant(kValue));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+ EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Parameter(0));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
+ StreamBuilder m(this, kMachInt32);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachFloat64);
+ m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArchRet, s[2]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Parameters.
+
+
+TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* param = m.Parameter(0);
+ m.Return(param);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_TRUE(s.IsDouble(param));
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ Node* param = m.Parameter(0);
+ m.Return(param);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_TRUE(s.IsReference(param));
+}
+
+
+// -----------------------------------------------------------------------------
+// Finish.
+
+
+TARGET_TEST_F(InstructionSelectorTest, Finish) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ Node* param = m.Parameter(0);
+ Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+ m.Return(finish);
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_TRUE(s[0]->Output()->IsUnallocated());
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kArchNop, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->InputCount());
+ ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[1]->InputAt(0)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_TRUE(s[1]->Output()->IsUnallocated());
+ EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(finish), s.ToVreg(s[1]->Output()));
+ EXPECT_TRUE(s.IsReference(finish));
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi.
+
+
+typedef InstructionSelectorTestWithParam<MachineType>
+ InstructionSelectorPhiTest;
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
+ const MachineType type = GetParam();
+ StreamBuilder m(this, type, type, type);
+ Node* param0 = m.Parameter(0);
+ Node* param1 = m.Parameter(1);
+ MLabel a, b, c;
+ m.Branch(m.Int32Constant(0), &a, &b);
+ m.Bind(&a);
+ m.Goto(&c);
+ m.Bind(&b);
+ m.Goto(&c);
+ m.Bind(&c);
+ Node* phi = m.Phi(type, param0, param1);
+ m.Return(phi);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param0));
+ EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param1));
+}
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
+ const MachineType type = GetParam();
+ StreamBuilder m(this, type, type, type);
+ Node* param0 = m.Parameter(0);
+ Node* param1 = m.Parameter(1);
+ MLabel a, b, c;
+ m.Branch(m.Int32Constant(1), &a, &b);
+ m.Bind(&a);
+ m.Goto(&c);
+ m.Bind(&b);
+ m.Goto(&c);
+ m.Bind(&c);
+ Node* phi = m.Phi(type, param0, param1);
+ m.Return(phi);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_EQ(s.IsReference(phi), s.IsReference(param0));
+ EXPECT_EQ(s.IsReference(phi), s.IsReference(param1));
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
+ ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
+ kMachInt16, kMachUint16, kMachInt32,
+ kMachUint32, kMachInt64, kMachUint64,
+ kMachPtr, kMachAnyTagged));
+
+
+// -----------------------------------------------------------------------------
+// ValueEffect.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
+ StreamBuilder m1(this, kMachInt32, kMachPtr);
+ Node* p1 = m1.Parameter(0);
+ m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+ Stream s1 = m1.Build(kAllInstructions);
+ StreamBuilder m2(this, kMachInt32, kMachPtr);
+ Node* p2 = m2.Parameter(0);
+ m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
+ m2.NewNode(m2.common()->ValueEffect(1), p2)));
+ Stream s2 = m2.Build(kAllInstructions);
+ EXPECT_LE(3U, s1.size());
+ ASSERT_EQ(s1.size(), s2.size());
+ TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
+ const Instruction* i1 = s1[i];
+ const Instruction* i2 = s2[i];
+ EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
+ EXPECT_EQ(i1->InputCount(), i2->InputCount());
+ EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Calls with deoptimization.
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id(42);
+
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Parameter(2);
+
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
+ Node* locals = m.NewNode(m.common()->StateValues(0));
+ Node* stack = m.NewNode(m.common()->StateValues(0));
+ Node* context_dummy = m.Int32Constant(0);
+
+ Node* state_node = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id,
+ OutputFrameStateCombine::Push()),
+ parameters, locals, stack, context_dummy, m.UndefinedConstant());
+ Node* call = m.CallJS0(function_node, receiver, context, state_node);
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
+ index++) {
+ }
+ // Now we should have two instructions: call and return.
+ ASSERT_EQ(index + 2, s.size());
+
+ EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+ // TODO(jarin) Check deoptimization table.
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id_before(42);
+
+ // Some arguments for the call node.
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Int32Constant(1); // Context is ignored.
+
+ // Build frame state for the state before the call.
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+ Node* locals = m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.5));
+ Node* stack = m.NewNode(m.common()->StateValues(1), m.UndefinedConstant());
+
+ Node* context_sentinel = m.Int32Constant(0);
+ Node* frame_state_before = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id_before,
+ OutputFrameStateCombine::Push()),
+ parameters, locals, stack, context_sentinel, m.UndefinedConstant());
+
+ // Build the call.
+ Node* call = m.CallFunctionStub0(function_node, receiver, context,
+ frame_state_before, CALL_AS_METHOD);
+
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+ index++) {
+ }
+ // Now we should have two instructions: call, return.
+ ASSERT_EQ(index + 2, s.size());
+
+ // Check the call instruction
+ const Instruction* call_instr = s[index++];
+ EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+ size_t num_operands =
+ 1 + // Code object.
+ 1 +
+ 4 + // Frame state deopt id + one input for each value in frame state.
+ 1 + // Function.
+ 1; // Context.
+ ASSERT_EQ(num_operands, call_instr->InputCount());
+
+ // Code object.
+ EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+ // Deoptimization id.
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+ FrameStateDescriptor* desc_before =
+ s.GetFrameStateDescriptor(deopt_id_before);
+ EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+ EXPECT_EQ(OutputFrameStateCombine::kPushOutput,
+ desc_before->state_combine().kind());
+ EXPECT_EQ(1u, desc_before->parameters_count());
+ EXPECT_EQ(1u, desc_before->locals_count());
+ EXPECT_EQ(1u, desc_before->stack_count());
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
+ EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3))); // This should be a context.
+ // We inserted 0 here.
+ EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(4)));
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(5))->IsUndefined());
+ EXPECT_EQ(kMachInt32, desc_before->GetType(0));
+ EXPECT_EQ(kMachAnyTagged, desc_before->GetType(1)); // context is always
+ // tagged/any.
+ EXPECT_EQ(kMachFloat64, desc_before->GetType(2));
+ EXPECT_EQ(kMachAnyTagged, desc_before->GetType(3));
+
+ // Function.
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(6)));
+ // Context.
+ EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(7)));
+
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+ EXPECT_EQ(index, s.size());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest,
+ CallFunctionStubDeoptRecursiveFrameState) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id_before(42);
+ BailoutId bailout_id_parent(62);
+
+ // Some arguments for the call node.
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Int32Constant(66);
+
+ // Build frame state for the state before the call.
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
+ Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
+ Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
+ Node* frame_state_parent =
+ m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_parent,
+ OutputFrameStateCombine::Ignore()),
+ parameters, locals, stack, context, m.UndefinedConstant());
+
+ Node* context2 = m.Int32Constant(46);
+ Node* parameters2 =
+ m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+ Node* locals2 =
+ m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.25));
+ Node* stack2 = m.NewNode(m.common()->StateValues(2), m.Int32Constant(44),
+ m.Int32Constant(45));
+ Node* frame_state_before =
+ m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_before,
+ OutputFrameStateCombine::Push()),
+ parameters2, locals2, stack2, context2, frame_state_parent);
+
+ // Build the call.
+ Node* call = m.CallFunctionStub0(function_node, receiver, context2,
+ frame_state_before, CALL_AS_METHOD);
+
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+ index++) {
+ }
+ // Now we should have three instructions: call, return.
+ EXPECT_EQ(index + 2, s.size());
+
+ // Check the call instruction
+ const Instruction* call_instr = s[index++];
+ EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+ size_t num_operands =
+ 1 + // Code object.
+ 1 + // Frame state deopt id
+ 5 + // One input for each value in frame state + context.
+ 4 + // One input for each value in the parent frame state + context.
+ 1 + // Function.
+ 1; // Context.
+ EXPECT_EQ(num_operands, call_instr->InputCount());
+ // Code object.
+ EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+ // Deoptimization id.
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+ FrameStateDescriptor* desc_before =
+ s.GetFrameStateDescriptor(deopt_id_before);
+ FrameStateDescriptor* desc_before_outer = desc_before->outer_state();
+ EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+ EXPECT_EQ(1u, desc_before_outer->parameters_count());
+ EXPECT_EQ(1u, desc_before_outer->locals_count());
+ EXPECT_EQ(1u, desc_before_outer->stack_count());
+ // Values from parent environment.
+ EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
+ EXPECT_EQ(kMachInt32, desc_before_outer->GetType(0));
+ // Context:
+ EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(kMachAnyTagged, desc_before_outer->GetType(1));
+ EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(kMachInt32, desc_before_outer->GetType(2));
+ EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
+ EXPECT_EQ(kMachInt32, desc_before_outer->GetType(3));
+ // Values from the nested frame.
+ EXPECT_EQ(1u, desc_before->parameters_count());
+ EXPECT_EQ(1u, desc_before->locals_count());
+ EXPECT_EQ(2u, desc_before->stack_count());
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
+ EXPECT_EQ(kMachInt32, desc_before->GetType(0));
+ EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
+ EXPECT_EQ(kMachAnyTagged, desc_before->GetType(1));
+ EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(8)));
+ EXPECT_EQ(kMachFloat64, desc_before->GetType(2));
+ EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(9)));
+ EXPECT_EQ(kMachInt32, desc_before->GetType(3));
+ EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(10)));
+ EXPECT_EQ(kMachInt32, desc_before->GetType(4));
+
+ // Function.
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(11)));
+ // Context.
+ EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(12)));
+ // Continuation.
+
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+ EXPECT_EQ(index, s.size());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
new file mode 100644
index 0000000000..073af15070
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -0,0 +1,241 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/macro-assembler.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+ public:
+ InstructionSelectorTest();
+ virtual ~InstructionSelectorTest();
+
+ base::RandomNumberGenerator* rng() { return &rng_; }
+
+ class Stream;
+
+ enum StreamBuilderMode {
+ kAllInstructions,
+ kTargetInstructions,
+ kAllExceptNopInstructions
+ };
+
+ class StreamBuilder FINAL : public RawMachineAssembler {
+ public:
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
+ : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type, MachineType parameter1_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type,
+ parameter1_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type, MachineType parameter1_type,
+ MachineType parameter2_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type,
+ parameter1_type, parameter2_type)),
+ test_(test) {}
+
+ Stream Build(CpuFeature feature) {
+ return Build(InstructionSelector::Features(feature));
+ }
+ Stream Build(CpuFeature feature1, CpuFeature feature2) {
+ return Build(InstructionSelector::Features(feature1, feature2));
+ }
+ Stream Build(StreamBuilderMode mode = kTargetInstructions) {
+ return Build(InstructionSelector::Features(), mode);
+ }
+ Stream Build(InstructionSelector::Features features,
+ StreamBuilderMode mode = kTargetInstructions);
+
+ private:
+ MachineSignature* MakeMachineSignature(Zone* zone,
+ MachineType return_type) {
+ MachineSignature::Builder builder(zone, 1, 0);
+ builder.AddReturn(return_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type) {
+ MachineSignature::Builder builder(zone, 1, 1);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type) {
+ MachineSignature::Builder builder(zone, 1, 2);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ builder.AddParam(parameter1_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type,
+ MachineType parameter2_type) {
+ MachineSignature::Builder builder(zone, 1, 3);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ builder.AddParam(parameter1_type);
+ builder.AddParam(parameter2_type);
+ return builder.Build();
+ }
+
+ private:
+ InstructionSelectorTest* test_;
+ };
+
+ class Stream FINAL {
+ public:
+ size_t size() const { return instructions_.size(); }
+ const Instruction* operator[](size_t index) const {
+ EXPECT_LT(index, size());
+ return instructions_[index];
+ }
+
+ bool IsDouble(const InstructionOperand* operand) const {
+ return IsDouble(ToVreg(operand));
+ }
+
+ bool IsDouble(const Node* node) const { return IsDouble(ToVreg(node)); }
+
+ bool IsInteger(const InstructionOperand* operand) const {
+ return IsInteger(ToVreg(operand));
+ }
+
+ bool IsInteger(const Node* node) const { return IsInteger(ToVreg(node)); }
+
+ bool IsReference(const InstructionOperand* operand) const {
+ return IsReference(ToVreg(operand));
+ }
+
+ bool IsReference(const Node* node) const {
+ return IsReference(ToVreg(node));
+ }
+
+ float ToFloat32(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToFloat32();
+ }
+
+ double ToFloat64(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToFloat64();
+ }
+
+ int32_t ToInt32(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToInt32();
+ }
+
+ int64_t ToInt64(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToInt64();
+ }
+
+ Handle<HeapObject> ToHeapObject(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToHeapObject();
+ }
+
+ int ToVreg(const InstructionOperand* operand) const {
+ if (operand->IsConstant()) return operand->index();
+ EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
+ return UnallocatedOperand::cast(operand)->virtual_register();
+ }
+
+ int ToVreg(const Node* node) const;
+
+ bool IsFixed(const InstructionOperand* operand, Register reg) const;
+ bool IsSameAsFirst(const InstructionOperand* operand) const;
+ bool IsUsedAtStart(const InstructionOperand* operand) const;
+
+ FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
+ EXPECT_LT(deoptimization_id, GetFrameStateDescriptorCount());
+ return deoptimization_entries_[deoptimization_id];
+ }
+
+ int GetFrameStateDescriptorCount() {
+ return static_cast<int>(deoptimization_entries_.size());
+ }
+
+ private:
+ bool IsDouble(int virtual_register) const {
+ return doubles_.find(virtual_register) != doubles_.end();
+ }
+
+ bool IsInteger(int virtual_register) const {
+ return !IsDouble(virtual_register) && !IsReference(virtual_register);
+ }
+
+ bool IsReference(int virtual_register) const {
+ return references_.find(virtual_register) != references_.end();
+ }
+
+ Constant ToConstant(const InstructionOperand* operand) const {
+ ConstantMap::const_iterator i;
+ if (operand->IsConstant()) {
+ i = constants_.find(operand->index());
+ EXPECT_FALSE(constants_.end() == i);
+ } else {
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+ i = immediates_.find(operand->index());
+ EXPECT_FALSE(immediates_.end() == i);
+ }
+ EXPECT_EQ(operand->index(), i->first);
+ return i->second;
+ }
+
+ friend class StreamBuilder;
+
+ typedef std::map<int, Constant> ConstantMap;
+ typedef std::map<NodeId, int> VirtualRegisters;
+
+ ConstantMap constants_;
+ ConstantMap immediates_;
+ std::deque<Instruction*> instructions_;
+ std::set<int> doubles_;
+ std::set<int> references_;
+ VirtualRegisters virtual_registers_;
+ std::deque<FrameStateDescriptor*> deoptimization_entries_;
+ };
+
+ base::RandomNumberGenerator rng_;
+};
+
+
+template <typename T>
+class InstructionSelectorTestWithParam
+ : public InstructionSelectorTest,
+ public ::testing::WithParamInterface<T> {};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
new file mode 100644
index 0000000000..0a0d8d66ad
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -0,0 +1,305 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public TypedGraphTest {
+ public:
+ JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags) {
+ MachineOperatorBuilder machine(kMachPtr, flags);
+ JSGraph jsgraph(graph(), common(), javascript(), &machine);
+ JSBuiltinReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ Node* Parameter(Type* t, int32_t index = 0) {
+ Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+ NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+ return n;
+ }
+
+ Handle<JSFunction> MathFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked(
+ "Math")).ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ JSObject::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return f;
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+ Type::UnsignedSmall(), Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+ Type::OtherUnsigned32(), Type::OtherSigned32(), Type::SignedSmall(),
+ Type::Signed32(), Type::Unsigned32(), Type::Integral32(),
+ Type::MinusZero(), Type::NaN(), Type::OtherNumber(),
+ Type::OrderedNumber(), Type::Number()};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Math.abs
+
+
+TEST_F(JSBuiltinReducerTest, MathAbs) {
+ Handle<JSFunction> f = MathFunction("abs");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Unsigned32())) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ } else {
+ Capture<Node*> branch;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSelect(kMachNone, IsNumberLessThan(IsNumberConstant(0), p0),
+ p0, IsNumberSubtract(IsNumberConstant(0), p0)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+ Handle<JSFunction> f = MathFunction("sqrt");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+ Handle<JSFunction> f = MathFunction("max");
+
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant());
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+ Handle<JSFunction> f = MathFunction("max");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+ Handle<JSFunction> f = MathFunction("max");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(
+ javascript()->CallFunction(4, NO_CALL_FUNCTION_FLAGS), fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSelect(kMachNone, IsNumberLessThan(p1, p0), p1, p0));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+ Handle<JSFunction> f = MathFunction("imul");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(
+ javascript()->CallFunction(4, NO_CALL_FUNCTION_FLAGS), fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.fround
+
+
+TEST_F(JSBuiltinReducerTest, MathFround) {
+ Handle<JSFunction> f = MathFunction("fround");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
+ Handle<JSFunction> f = MathFunction("floor");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
+ Handle<JSFunction> f = MathFunction("floor");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
+ Handle<JSFunction> f = MathFunction("ceil");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
+ Handle<JSFunction> f = MathFunction("ceil");
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+ ASSERT_FALSE(r.Changed());
+ }
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
new file mode 100644
index 0000000000..31d5a034d2
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Shared operators.
+
+namespace {
+
+struct SharedOperator {
+ const Operator* (JSOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+ int frame_state_input_count;
+ int effect_input_count;
+ int control_input_count;
+ int value_output_count;
+ int effect_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SharedOperator& sop) {
+ return os << IrOpcode::Mnemonic(sop.opcode);
+}
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
+ effect_input_count, control_input_count, value_output_count, \
+ effect_output_count) \
+ { \
+ &JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
+ value_input_count, frame_state_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count \
+ }
+ SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
+ SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
+ SHARED(LessThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(GreaterThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(BitwiseOr, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(BitwiseXor, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(BitwiseAnd, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(ShiftLeft, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(ShiftRight, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(ShiftRightLogical, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Add, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Subtract, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Multiply, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Divide, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Modulus, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(UnaryNot, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToBoolean, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToNumber, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToName, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
+ SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(Create, Operator::kEliminatable, 0, 0, 1, 1, 1, 1),
+ SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0),
+ SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
+ SHARED(Debugger, Operator::kNoProperties, 0, 0, 1, 1, 0, 1),
+ SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(CreateGlobalContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
+#undef SHARED
+};
+
+} // namespace
+
+
+class JSSharedOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<SharedOperator> {};
+
+
+TEST_P(JSSharedOperatorTest, InstancesAreGloballyShared) {
+ const SharedOperator& sop = GetParam();
+ JSOperatorBuilder javascript1(zone());
+ JSOperatorBuilder javascript2(zone());
+ EXPECT_EQ((javascript1.*sop.constructor)(), (javascript2.*sop.constructor)());
+}
+
+
+TEST_P(JSSharedOperatorTest, NumberOfInputsAndOutputs) {
+ JSOperatorBuilder javascript(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)();
+
+ const int context_input_count = 1;
+ // TODO(jarin): Get rid of this hack.
+ const int frame_state_input_count =
+ FLAG_turbo_deoptimization ? sop.frame_state_input_count : 0;
+ EXPECT_EQ(sop.value_input_count, op->ValueInputCount());
+ EXPECT_EQ(context_input_count, OperatorProperties::GetContextInputCount(op));
+ EXPECT_EQ(frame_state_input_count,
+ OperatorProperties::GetFrameStateInputCount(op));
+ EXPECT_EQ(sop.effect_input_count, op->EffectInputCount());
+ EXPECT_EQ(sop.control_input_count, op->ControlInputCount());
+ EXPECT_EQ(sop.value_input_count + context_input_count +
+ frame_state_input_count + sop.effect_input_count +
+ sop.control_input_count,
+ OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
+ EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(JSSharedOperatorTest, OpcodeIsCorrect) {
+ JSOperatorBuilder javascript(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)();
+ EXPECT_EQ(sop.opcode, op->opcode());
+}
+
+
+TEST_P(JSSharedOperatorTest, Properties) {
+ JSOperatorBuilder javascript(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)();
+ EXPECT_EQ(sop.properties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
+ ::testing::ValuesIn(kSharedOperators));
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
new file mode 100644
index 0000000000..539785d79b
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -0,0 +1,192 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const ExternalArrayType kExternalArrayTypes[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) kExternal##Type##Array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+};
+
+
+const StrictMode kStrictModes[] = {SLOPPY, STRICT};
+
+} // namespace
+
+
+class JSTypedLoweringTest : public TypedGraphTest {
+ public:
+ JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
+ virtual ~JSTypedLoweringTest() {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine;
+ JSGraph jsgraph(graph(), common(), javascript(), &machine);
+ JSTypedLowering reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ Node* Parameter(Type* type, int index = 0) {
+ Node* node = graph()->NewNode(common()->Parameter(index), graph()->start());
+ NodeProperties::SetBounds(node, Bounds(Type::None(), type));
+ return node;
+ }
+
+ Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
+ Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
+ Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
+ return buffer;
+ }
+
+ Matcher<Node*> IsIntPtrConstant(intptr_t value) {
+ return sizeof(value) == 4 ? IsInt32Constant(static_cast<int32_t>(value))
+ : IsInt64Constant(static_cast<int64_t>(value));
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+// -----------------------------------------------------------------------------
+// JSToBoolean
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
+ Node* input = Parameter(Type::String());
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(
+ IsLoadField(AccessBuilder::ForStringLength(), input,
+ graph()->start(), graph()->start()),
+ IsNumberConstant(0))));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumberAndBoolean) {
+ Node* p0 = Parameter(Type::OrderedNumber(), 0);
+ Node* p1 = Parameter(Type::Boolean(), 1);
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(),
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, control),
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(kMachAnyTagged,
+ IsBooleanNot(IsNumberEqual(p0, IsNumberConstant(0))), p1, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSLoadProperty
+
+
+TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
+ const size_t kLength = 17;
+ double backing_store[kLength];
+ Handle<JSArrayBuffer> buffer =
+ NewArrayBuffer(backing_store, sizeof(backing_store));
+ VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
+ FeedbackVectorICSlot::Invalid());
+ TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+
+ Node* key = Parameter(Type::Integral32());
+ Node* base = HeapConstant(array);
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
+ key, context);
+ if (FLAG_turbo_deoptimization) {
+ node->AppendInput(zone(), UndefinedConstant());
+ }
+ node->AppendInput(zone(), effect);
+ node->AppendInput(zone(), control);
+ Reduction r = Reduce(node);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(
+ AccessBuilder::ForTypedArrayElement(type, true),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, IsNumberConstant(array->length()->Number()), effect));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSStoreProperty
+
+
+TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
+ const size_t kLength = 17;
+ double backing_store[kLength];
+ Handle<JSArrayBuffer> buffer =
+ NewArrayBuffer(backing_store, sizeof(backing_store));
+ TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+ TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+
+ Node* key = Parameter(Type::Integral32());
+ Node* base = HeapConstant(array);
+ Node* value = Parameter(Type::Any());
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+ base, key, value, context);
+ if (FLAG_turbo_deoptimization) {
+ node->AppendInput(zone(), UndefinedConstant());
+ }
+ node->AppendInput(zone(), effect);
+ node->AppendInput(zone(), control);
+ Reduction r = Reduce(node);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStoreElement(
+ AccessBuilder::ForTypedArrayElement(type, true),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, IsNumberConstant(array->length()->Number()), value,
+ effect, control));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
new file mode 100644
index 0000000000..a62216dfcd
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -0,0 +1,1227 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineOperatorReducerTest : public TypedGraphTest {
+ public:
+ explicit MachineOperatorReducerTest(int num_parameters = 2)
+ : TypedGraphTest(num_parameters) {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &machine_);
+ MachineOperatorReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ Matcher<Node*> IsTruncatingDiv(const Matcher<Node*>& dividend_matcher,
+ const int32_t divisor) {
+ base::MagicNumbersForDivision<uint32_t> const mag =
+ base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+ int32_t const multiplier = bit_cast<int32_t>(mag.multiplier);
+ int32_t const shift = bit_cast<int32_t>(mag.shift);
+ Matcher<Node*> quotient_matcher =
+ IsInt32MulHigh(dividend_matcher, IsInt32Constant(multiplier));
+ if (divisor > 0 && multiplier < 0) {
+ quotient_matcher = IsInt32Add(quotient_matcher, dividend_matcher);
+ } else if (divisor < 0 && multiplier > 0) {
+ quotient_matcher = IsInt32Sub(quotient_matcher, dividend_matcher);
+ }
+ if (shift) {
+ quotient_matcher = IsWord32Sar(quotient_matcher, IsInt32Constant(shift));
+ }
+ return IsInt32Add(quotient_matcher,
+ IsWord32Shr(dividend_matcher, IsInt32Constant(31)));
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+
+template <typename T>
+class MachineOperatorReducerTestWithParam
+ : public MachineOperatorReducerTest,
+ public ::testing::WithParamInterface<T> {
+ public:
+ explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
+ : MachineOperatorReducerTest(num_parameters) {}
+ virtual ~MachineOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+const float kFloat32Values[] = {
+ -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+ -1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
+ -1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
+ -1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
+ -2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
+ -4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
+ -1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
+ -1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
+ -4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
+ -964300.0f, -192446.0f, -28455.0f,
+ -27194.0f, -26401.0f, -20575.0f,
+ -17069.0f, -9167.0f, -960.178f,
+ -113.0f, -62.0f, -15.0f,
+ -7.0f, -0.0256635f, -4.60374e-07f,
+ -3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
+ -1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
+ -1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
+ -5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
+ -1.27126e-38f, -0.0f, 0.0f,
+ 1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
+ 3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
+ 1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
+ 5.57888e-07f, 4.89988e-05f, 0.244326f,
+ 12.4895f, 19.0f, 47.0f,
+ 106.0f, 538.324f, 564.536f,
+ 819.124f, 7048.0f, 12611.0f,
+ 19878.0f, 20309.0f, 797056.0f,
+ 1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
+ 3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
+ 1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
+ 1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
+ 2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
+ 1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
+ std::numeric_limits<float>::infinity()};
+
+
+const double kFloat64Values[] = {
+ -V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
+ -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
+ -1.67813e+72, -2.3382e+55, -3.179e+30, -1.441e+09, -1.0647e+09,
+ -7.99361e+08, -5.77375e+08, -2.20984e+08, -32757, -13171,
+ -9970, -3984, -107, -105, -92,
+ -77, -61, -0.000208163, -1.86685e-06, -1.17296e-10,
+ -9.26358e-11, -5.08004e-60, -1.74753e-65, -1.06561e-71, -5.67879e-79,
+ -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
+ -4.40497e-267, -2.19666e-273, -4.9998e-276, -5.59821e-278, -2.03855e-282,
+ -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0, 0.0,
+ 2.22507e-308, 1.30127e-270, 7.62898e-260, 4.00313e-249, 3.16829e-233,
+ 1.85244e-228, 2.03544e-129, 1.35126e-110, 1.01182e-106, 5.26333e-94,
+ 1.35292e-90, 2.85394e-83, 1.78323e-77, 5.4967e-57, 1.03207e-25,
+ 4.57401e-25, 1.58738e-05, 2, 125, 2310,
+ 9636, 14802, 17168, 28945, 29305,
+ 4.81336e+07, 1.41207e+08, 4.65962e+08, 1.40499e+09, 2.12648e+09,
+ 8.80006e+30, 1.4446e+45, 1.12164e+54, 2.48188e+89, 6.71121e+102,
+ 3.074e+112, 4.9699e+152, 5.58383e+166, 4.30654e+172, 7.08824e+185,
+ 9.6586e+214, 2.028e+223, 6.63277e+243, 1.56192e+261, 1.23202e+269,
+ 5.72883e+289, 8.5798e+290, 1.40256e+294, 1.79769e+308, V8_INFINITY};
+
+
+const int32_t kInt32Values[] = {
+ std::numeric_limits<int32_t>::min(), -1914954528, -1698749618,
+ -1578693386, -1577976073, -1573998034,
+ -1529085059, -1499540537, -1299205097,
+ -1090814845, -938186388, -806828902,
+ -750927650, -520676892, -513661538,
+ -453036354, -433622833, -282638793,
+ -28375, -27788, -22770,
+ -18806, -14173, -11956,
+ -11200, -10212, -8160,
+ -3751, -2758, -1522,
+ -121, -120, -118,
+ -117, -106, -84,
+ -80, -74, -59,
+ -52, -48, -39,
+ -35, -17, -11,
+ -10, -9, -7,
+ -5, 0, 9,
+ 12, 17, 23,
+ 29, 31, 33,
+ 35, 40, 47,
+ 55, 56, 62,
+ 64, 67, 68,
+ 69, 74, 79,
+ 84, 89, 90,
+ 97, 104, 118,
+ 124, 126, 127,
+ 7278, 17787, 24136,
+ 24202, 25570, 26680,
+ 30242, 32399, 420886487,
+ 642166225, 821912648, 822577803,
+ 851385718, 1212241078, 1411419304,
+ 1589626102, 1596437184, 1876245816,
+ 1954730266, 2008792749, 2045320228,
+ std::numeric_limits<int32_t>::max()};
+
+
+const int64_t kInt64Values[] = {
+ std::numeric_limits<int64_t>::min(), V8_INT64_C(-8974392461363618006),
+ V8_INT64_C(-8874367046689588135), V8_INT64_C(-8269197512118230839),
+ V8_INT64_C(-8146091527100606733), V8_INT64_C(-7550917981466150848),
+ V8_INT64_C(-7216590251577894337), V8_INT64_C(-6464086891160048440),
+ V8_INT64_C(-6365616494908257190), V8_INT64_C(-6305630541365849726),
+ V8_INT64_C(-5982222642272245453), V8_INT64_C(-5510103099058504169),
+ V8_INT64_C(-5496838675802432701), V8_INT64_C(-4047626578868642657),
+ V8_INT64_C(-4033755046900164544), V8_INT64_C(-3554299241457877041),
+ V8_INT64_C(-2482258764588614470), V8_INT64_C(-1688515425526875335),
+ V8_INT64_C(-924784137176548532), V8_INT64_C(-725316567157391307),
+ V8_INT64_C(-439022654781092241), V8_INT64_C(-105545757668917080),
+ V8_INT64_C(-2088319373), V8_INT64_C(-2073699916),
+ V8_INT64_C(-1844949911), V8_INT64_C(-1831090548),
+ V8_INT64_C(-1756711933), V8_INT64_C(-1559409497),
+ V8_INT64_C(-1281179700), V8_INT64_C(-1211513985),
+ V8_INT64_C(-1182371520), V8_INT64_C(-785934753),
+ V8_INT64_C(-767480697), V8_INT64_C(-705745662),
+ V8_INT64_C(-514362436), V8_INT64_C(-459916580),
+ V8_INT64_C(-312328082), V8_INT64_C(-302949707),
+ V8_INT64_C(-285499304), V8_INT64_C(-125701262),
+ V8_INT64_C(-95139843), V8_INT64_C(-32768),
+ V8_INT64_C(-27542), V8_INT64_C(-23600),
+ V8_INT64_C(-18582), V8_INT64_C(-17770),
+ V8_INT64_C(-9086), V8_INT64_C(-9010),
+ V8_INT64_C(-8244), V8_INT64_C(-2890),
+ V8_INT64_C(-103), V8_INT64_C(-34),
+ V8_INT64_C(-27), V8_INT64_C(-25),
+ V8_INT64_C(-9), V8_INT64_C(-7),
+ V8_INT64_C(0), V8_INT64_C(2),
+ V8_INT64_C(38), V8_INT64_C(58),
+ V8_INT64_C(65), V8_INT64_C(93),
+ V8_INT64_C(111), V8_INT64_C(1003),
+ V8_INT64_C(1267), V8_INT64_C(12797),
+ V8_INT64_C(23122), V8_INT64_C(28200),
+ V8_INT64_C(30888), V8_INT64_C(42648848),
+ V8_INT64_C(116836693), V8_INT64_C(263003643),
+ V8_INT64_C(571039860), V8_INT64_C(1079398689),
+ V8_INT64_C(1145196402), V8_INT64_C(1184846321),
+ V8_INT64_C(1758281648), V8_INT64_C(1859991374),
+ V8_INT64_C(1960251588), V8_INT64_C(2042443199),
+ V8_INT64_C(296220586027987448), V8_INT64_C(1015494173071134726),
+ V8_INT64_C(1151237951914455318), V8_INT64_C(1331941174616854174),
+ V8_INT64_C(2022020418667972654), V8_INT64_C(2450251424374977035),
+ V8_INT64_C(3668393562685561486), V8_INT64_C(4858229301215502171),
+ V8_INT64_C(4919426235170669383), V8_INT64_C(5034286595330341762),
+ V8_INT64_C(5055797915536941182), V8_INT64_C(6072389716149252074),
+ V8_INT64_C(6185309910199801210), V8_INT64_C(6297328311011094138),
+ V8_INT64_C(6932372858072165827), V8_INT64_C(8483640924987737210),
+ V8_INT64_C(8663764179455849203), V8_INT64_C(8877197042645298254),
+ V8_INT64_C(8901543506779157333), std::numeric_limits<int64_t>::max()};
+
+
+const uint32_t kUint32Values[] = {
+ 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+ const Operator* (MachineOperatorBuilder::*constructor)();
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+ return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+ {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
+ {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
+ {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
+ {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
+ {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
+ {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
+ {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
+ {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
+
+} // namespace
+
+
+typedef MachineOperatorReducerTestWithParam<UnaryOperator>
+ MachineUnaryOperatorReducerTest;
+
+
+TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
+ const UnaryOperator unop = GetParam();
+ Reduction reduction =
+ Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
+ MachineUnaryOperatorReducerTest,
+ ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt32(), Float64Constant(FastI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToUint32
+
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToUint32WithChangeUint32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToUint32(),
+ graph()->NewNode(machine()->ChangeUint32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToFloat64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), Int32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastI2D(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToInt64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), Int32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
+ Int32Constant(bit_cast<int32_t>(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToUint64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ Int32Constant(bit_cast<int32_t>(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+ TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+ TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithPhi) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Node* const merge = graph()->start();
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(),
+ graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsPhi(kMachInt32, IsTruncateFloat64ToInt32(p0),
+ IsTruncateFloat64ToInt32(p1), merge));
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithChangeInt32ToInt64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateInt64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
+ TRACED_FOREACH(int64_t, x, kInt64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32And
+
+
+TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ TRACED_FOREACH(int32_t, l, kInt32Values) {
+ if (k == 0 || k == -1 || l == 0 || l == -1) continue;
+
+ // (x & K) & L => x & (K & L)
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(k)),
+ Int32Constant(l)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+
+ // (K & x) & L => x & (K & L)
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32And(), Int32Constant(k), p0),
+ Int32Constant(l)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Xor
+
+
+TEST_F(MachineOperatorReducerTest, Word32XorWithWord32XorAndMinusOne) {
+ Node* const p0 = Parameter(0);
+
+ // (x ^ -1) ^ -1 => x
+ Reduction r1 = Reduce(graph()->NewNode(
+ machine()->Word32Xor(),
+ graph()->NewNode(machine()->Word32Xor(), p0, Int32Constant(-1)),
+ Int32Constant(-1)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), p0);
+
+ // -1 ^ (x ^ -1) => x
+ Reduction r2 = Reduce(graph()->NewNode(
+ machine()->Word32Xor(), Int32Constant(-1),
+ graph()->NewNode(machine()->Word32Xor(), p0, Int32Constant(-1))));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), p0);
+
+ // (-1 ^ x) ^ -1 => x
+ Reduction r3 = Reduce(graph()->NewNode(
+ machine()->Word32Xor(),
+ graph()->NewNode(machine()->Word32Xor(), Int32Constant(-1), p0),
+ Int32Constant(-1)));
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_EQ(r3.replacement(), p0);
+
+ // -1 ^ (-1 ^ x) => x
+ Reduction r4 = Reduce(graph()->NewNode(
+ machine()->Word32Xor(), Int32Constant(-1),
+ graph()->NewNode(machine()->Word32Xor(), Int32Constant(-1), p0)));
+ ASSERT_TRUE(r4.Changed());
+ EXPECT_EQ(r4.replacement(), p0);
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Ror
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
+ Node* value = Parameter(0);
+ Node* shift = Parameter(1);
+ Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
+ Node* shr = graph()->NewNode(
+ machine()->Word32Shr(), value,
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
+
+ // (x << y) | (x >> (32 - y)) => x ror y
+ Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+ Reduction reduction1 = Reduce(node1);
+ EXPECT_TRUE(reduction1.Changed());
+ EXPECT_EQ(reduction1.replacement(), node1);
+ EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
+
+ // (x >> (32 - y)) | (x << y) => x ror y
+ Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+ Reduction reduction2 = Reduce(node2);
+ EXPECT_TRUE(reduction2.Changed());
+ EXPECT_EQ(reduction2.replacement(), node2);
+ EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
+}
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
+ Node* value = Parameter(0);
+ TRACED_FORRANGE(int32_t, k, 0, 31) {
+ Node* shl =
+ graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(k));
+ Node* shr =
+ graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
+
+ // (x << K) | (x >> ((32 - K) - y)) => x ror K
+ Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+ Reduction reduction1 = Reduce(node1);
+ EXPECT_TRUE(reduction1.Changed());
+ EXPECT_EQ(reduction1.replacement(), node1);
+ EXPECT_THAT(reduction1.replacement(),
+ IsWord32Ror(value, IsInt32Constant(k)));
+
+ // (x >> (32 - K)) | (x << K) => x ror K
+ Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+ Reduction reduction2 = Reduce(node2);
+ EXPECT_TRUE(reduction2.Changed());
+ EXPECT_EQ(reduction2.replacement(), node2);
+ EXPECT_THAT(reduction2.replacement(),
+ IsWord32Ror(value, IsInt32Constant(k)));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithZeroShift) {
+ Node* value = Parameter(0);
+ Node* node =
+ graph()->NewNode(machine()->Word32Ror(), value, Int32Constant(0));
+ Reduction reduction = Reduce(node);
+ EXPECT_TRUE(reduction.Changed());
+ EXPECT_EQ(reduction.replacement(), value);
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
+ TRACED_FOREACH(int32_t, x, kUint32Values) {
+ TRACED_FORRANGE(int32_t, y, 0, 31) {
+ Node* node = graph()->NewNode(machine()->Word32Ror(), Int32Constant(x),
+ Int32Constant(y));
+ Reduction reduction = Reduce(node);
+ EXPECT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(base::bits::RotateRight32(x, y)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Shl
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithZeroShift) {
+ Node* p0 = Parameter(0);
+ Node* node = graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(0));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Sar) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, x, 1, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Sar(), p0, Int32Constant(x)),
+ Int32Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, x, 1, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Shr(), p0, Int32Constant(x)),
+ Int32Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32Div
+
+
+TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0), p0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Sar(IsInt32Add(IsWord32Shr(p0, IsInt32Constant(31)), p0),
+ IsInt32Constant(1)));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(-2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt32Sub(
+ IsInt32Constant(0),
+ IsWord32Sar(IsInt32Add(IsWord32Shr(p0, IsInt32Constant(31)), p0),
+ IsInt32Constant(1))));
+ }
+ TRACED_FORRANGE(int32_t, shift, 2, 30) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Div(), p0,
+ Int32Constant(1 << shift), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Sar(IsInt32Add(IsWord32Shr(IsWord32Sar(p0, IsInt32Constant(31)),
+ IsInt32Constant(32 - shift)),
+ p0),
+ IsInt32Constant(shift)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 2, 31) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0,
+ Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt32Sub(
+ IsInt32Constant(0),
+ IsWord32Sar(
+ IsInt32Add(IsWord32Shr(IsWord32Sar(p0, IsInt32Constant(31)),
+ IsInt32Constant(32 - shift)),
+ p0),
+ IsInt32Constant(shift))));
+ }
+ TRACED_FOREACH(int32_t, divisor, kInt32Values) {
+ if (divisor < 0) {
+ if (base::bits::IsPowerOfTwo32(-divisor)) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0),
+ IsTruncatingDiv(p0, -divisor)));
+ } else if (divisor > 0) {
+ if (base::bits::IsPowerOfTwo32(divisor)) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncatingDiv(p0, divisor));
+ }
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Div(), p0, p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Uint32Div
+
+
+TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), Int32Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), p0, Int32Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), p0, Int32Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
+ TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Div(), Uint32Constant(dividend),
+ Uint32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ base::bits::UnsignedDiv32(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint32_t, shift, 1, 31) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Div(), p0,
+ Uint32Constant(1u << shift), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32Shr(p0, IsInt32Constant(bit_cast<int32_t>(shift))));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Div(), p0, p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32Mod
+
+
+TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), Int32Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ TRACED_FOREACH(int32_t, dividend, kInt32Values) {
+ TRACED_FOREACH(int32_t, divisor, kInt32Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int32Mod(), Int32Constant(dividend),
+ Int32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedMod32(dividend, divisor)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 30) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0,
+ Int32Constant(1 << shift), graph()->start()));
+ int32_t const mask = (1 << shift) - 1;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsInt32Sub(IsInt32Constant(0),
+ IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
+ IsInt32Constant(mask))),
+ IsWord32And(p0, IsInt32Constant(mask))));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0,
+ Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
+ graph()->start()));
+ int32_t const mask = bit_cast<int32_t, uint32_t>((1U << shift) - 1);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsInt32Sub(IsInt32Constant(0),
+ IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
+ IsInt32Constant(mask))),
+ IsWord32And(p0, IsInt32Constant(mask))));
+ }
+ TRACED_FOREACH(int32_t, divisor, kInt32Values) {
+ if (divisor == 0 || base::bits::IsPowerOfTwo32(Abs(divisor))) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Sub(p0, IsInt32Mul(IsTruncatingDiv(p0, Abs(divisor)),
+ IsInt32Constant(Abs(divisor)))));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0, p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// Uint32Mod
+
+
+TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), p0, Int32Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), Int32Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), p0, Int32Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
+ TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Mod(), Uint32Constant(dividend),
+ Uint32Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ base::bits::UnsignedMod32(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint32_t, shift, 1, 31) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Mod(), p0,
+ Uint32Constant(1u << shift), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32And(p0, IsInt32Constant(
+ bit_cast<int32_t>((1u << shift) - 1u))));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Mod(), p0, p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32AddWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+ Node* p0 = Parameter(0);
+ {
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+ Int32Constant(0), p0);
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+ {
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
+ Int32Constant(0));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+ Int32Constant(x), Int32Constant(y));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32SubWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+ Node* p0 = Parameter(0);
+ Node* add =
+ graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
+ Int32Constant(x), Int32Constant(y));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Uint32LessThan
+
+
+TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
+ Node* const p0 = Parameter(0);
+ TRACED_FORRANGE(uint32_t, shift, 1, 3) {
+ const uint32_t limit = (kMaxInt >> shift) - 1;
+ Node* const node = graph()->NewNode(
+ machine()->Uint32LessThan(),
+ graph()->NewNode(machine()->Word32Sar(), p0, Uint32Constant(shift)),
+ Uint32Constant(limit));
+
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsUint32LessThan(
+ p0, IsInt32Constant(bit_cast<int32_t>(limit << shift))));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64Mul
+
+
+TEST_F(MachineOperatorReducerTest, Float64MulWithMinusOne) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Mul(), p0, Float64Constant(-1.0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sub(IsFloat64Constant(-0.0), p0));
+ }
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Mul(), Float64Constant(-1.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sub(IsFloat64Constant(-0.0), p0));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Store
+
+
+TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
+ const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ Node* const base = Parameter(0);
+ Node* const index = Parameter(1);
+ Node* const value = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Node* const node =
+ graph()->NewNode(machine()->Store(rep), base, index,
+ graph()->NewNode(machine()->Word32And(), value,
+ Uint32Constant(x | 0xffu)),
+ effect, control);
+
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(rep, base, index, value, effect, control));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
+ const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ Node* const base = Parameter(0);
+ Node* const index = Parameter(1);
+ Node* const value = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FORRANGE(int32_t, x, 1, 24) {
+ Node* const node = graph()->NewNode(
+ machine()->Store(rep), base, index,
+ graph()->NewNode(
+ machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(x)),
+ Int32Constant(x)),
+ effect, control);
+
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(rep, base, index, value, effect, control));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
+ const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ Node* const base = Parameter(0);
+ Node* const index = Parameter(1);
+ Node* const value = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Node* const node =
+ graph()->NewNode(machine()->Store(rep), base, index,
+ graph()->NewNode(machine()->Word32And(), value,
+ Uint32Constant(x | 0xffffu)),
+ effect, control);
+
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(rep, base, index, value, effect, control));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
+ const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ Node* const base = Parameter(0);
+ Node* const index = Parameter(1);
+ Node* const value = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FORRANGE(int32_t, x, 1, 16) {
+ Node* const node = graph()->NewNode(
+ machine()->Store(rep), base, index,
+ graph()->NewNode(
+ machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(x)),
+ Int32Constant(x)),
+ effect, control);
+
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(rep, base, index, value, effect, control));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
new file mode 100644
index 0000000000..6e41faa38d
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -0,0 +1,314 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#if GTEST_HAS_COMBINE
+
+template <typename T>
+class MachineOperatorTestWithParam
+ : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
+ protected:
+ MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+ const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
+};
+
+
+namespace {
+
+const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+
+
+const MachineType kMachineTypes[] = {
+ kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
+ kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
+ kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
+ kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Load operator.
+
+
+typedef MachineOperatorTestWithParam<LoadRepresentation>
+ MachineLoadOperatorTest;
+
+
+TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
+}
+
+
+TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const Operator* op = machine.Load(GetParam());
+
+ EXPECT_EQ(2, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
+}
+
+
+TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(GetParam(),
+ OpParameter<LoadRepresentation>(machine.Load(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kMachineTypes)));
+
+
+// -----------------------------------------------------------------------------
+// Store operator.
+
+
+class MachineStoreOperatorTest
+ : public MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> > {
+ protected:
+ StoreRepresentation GetParam() const {
+ return StoreRepresentation(
+ ::testing::get<0>(MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
+ ::testing::get<1>(MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+ }
+};
+
+
+TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
+}
+
+
+TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const Operator* op = machine.Store(GetParam());
+
+ EXPECT_EQ(3, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
+}
+
+
+TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(GetParam(),
+ OpParameter<StoreRepresentation>(machine.Store(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachineStoreOperatorTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(kMachineReps),
+ ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+ ::testing::Values(kNoWriteBarrier,
+ kFullWriteBarrier))));
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+ const Operator* (MachineOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ int value_input_count;
+ int control_input_count;
+ int value_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+ return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, value_input_count, control_input_count, value_output_count) \
+ { \
+ &MachineOperatorBuilder::Name, IrOpcode::k##Name, value_input_count, \
+ control_input_count, value_output_count \
+ }
+ PURE(Word32And, 2, 0, 1), PURE(Word32Or, 2, 0, 1), PURE(Word32Xor, 2, 0, 1),
+ PURE(Word32Shl, 2, 0, 1), PURE(Word32Shr, 2, 0, 1),
+ PURE(Word32Sar, 2, 0, 1), PURE(Word32Ror, 2, 0, 1),
+ PURE(Word32Equal, 2, 0, 1), PURE(Word64And, 2, 0, 1),
+ PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1), PURE(Word64Shl, 2, 0, 1),
+ PURE(Word64Shr, 2, 0, 1), PURE(Word64Sar, 2, 0, 1),
+ PURE(Word64Ror, 2, 0, 1), PURE(Word64Equal, 2, 0, 1),
+ PURE(Int32Add, 2, 0, 1), PURE(Int32AddWithOverflow, 2, 0, 2),
+ PURE(Int32Sub, 2, 0, 1), PURE(Int32SubWithOverflow, 2, 0, 2),
+ PURE(Int32Mul, 2, 0, 1), PURE(Int32MulHigh, 2, 0, 1),
+ PURE(Int32Div, 2, 1, 1), PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1),
+ PURE(Uint32Mod, 2, 1, 1), PURE(Int32LessThan, 2, 0, 1),
+ PURE(Int32LessThanOrEqual, 2, 0, 1), PURE(Uint32LessThan, 2, 0, 1),
+ PURE(Uint32LessThanOrEqual, 2, 0, 1), PURE(Int64Add, 2, 0, 1),
+ PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1), PURE(Int64Div, 2, 0, 1),
+ PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1), PURE(Uint64Mod, 2, 0, 1),
+ PURE(Int64LessThan, 2, 0, 1), PURE(Int64LessThanOrEqual, 2, 0, 1),
+ PURE(Uint64LessThan, 2, 0, 1), PURE(ChangeFloat32ToFloat64, 1, 0, 1),
+ PURE(ChangeFloat64ToInt32, 1, 0, 1), PURE(ChangeFloat64ToUint32, 1, 0, 1),
+ PURE(ChangeInt32ToInt64, 1, 0, 1), PURE(ChangeUint32ToFloat64, 1, 0, 1),
+ PURE(ChangeUint32ToUint64, 1, 0, 1),
+ PURE(TruncateFloat64ToFloat32, 1, 0, 1),
+ PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
+ PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
+ PURE(Float64Mul, 2, 0, 1), PURE(Float64Div, 2, 0, 1),
+ PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
+ PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
+ PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
+ PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
+ PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1)
+#undef PURE
+};
+
+
+typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
+
+} // namespace
+
+
+TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
+ const PureOperator& pop = GetParam();
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
+}
+
+
+TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+
+ EXPECT_EQ(pop.value_input_count, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(pop.control_input_count, op->ControlInputCount());
+ EXPECT_EQ(pop.value_input_count + pop.control_input_count,
+ OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(pop.value_output_count, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(MachinePureOperatorTest, MarkedAsPure) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+ EXPECT_TRUE(op->HasProperty(Operator::kPure));
+}
+
+
+TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+ EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachinePureOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kPureOperators)));
+
+#endif // GTEST_HAS_COMBINE
+
+
+// -----------------------------------------------------------------------------
+// Pseudo operators.
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
+ MachineOperatorBuilder machine(kRepWord32);
+ EXPECT_EQ(machine.Word32And(), machine.WordAnd());
+ EXPECT_EQ(machine.Word32Or(), machine.WordOr());
+ EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
+ EXPECT_EQ(machine.Word32Shl(), machine.WordShl());
+ EXPECT_EQ(machine.Word32Shr(), machine.WordShr());
+ EXPECT_EQ(machine.Word32Sar(), machine.WordSar());
+ EXPECT_EQ(machine.Word32Ror(), machine.WordRor());
+ EXPECT_EQ(machine.Word32Equal(), machine.WordEqual());
+ EXPECT_EQ(machine.Int32Add(), machine.IntAdd());
+ EXPECT_EQ(machine.Int32Sub(), machine.IntSub());
+ EXPECT_EQ(machine.Int32Mul(), machine.IntMul());
+ EXPECT_EQ(machine.Int32Div(), machine.IntDiv());
+ EXPECT_EQ(machine.Uint32Div(), machine.UintDiv());
+ EXPECT_EQ(machine.Int32Mod(), machine.IntMod());
+ EXPECT_EQ(machine.Uint32Mod(), machine.UintMod());
+ EXPECT_EQ(machine.Int32LessThan(), machine.IntLessThan());
+ EXPECT_EQ(machine.Int32LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
+ MachineOperatorBuilder machine(kRepWord64);
+ EXPECT_EQ(machine.Word64And(), machine.WordAnd());
+ EXPECT_EQ(machine.Word64Or(), machine.WordOr());
+ EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
+ EXPECT_EQ(machine.Word64Shl(), machine.WordShl());
+ EXPECT_EQ(machine.Word64Shr(), machine.WordShr());
+ EXPECT_EQ(machine.Word64Sar(), machine.WordSar());
+ EXPECT_EQ(machine.Word64Ror(), machine.WordRor());
+ EXPECT_EQ(machine.Word64Equal(), machine.WordEqual());
+ EXPECT_EQ(machine.Int64Add(), machine.IntAdd());
+ EXPECT_EQ(machine.Int64Sub(), machine.IntSub());
+ EXPECT_EQ(machine.Int64Mul(), machine.IntMul());
+ EXPECT_EQ(machine.Int64Div(), machine.IntDiv());
+ EXPECT_EQ(machine.Uint64Div(), machine.UintDiv());
+ EXPECT_EQ(machine.Int64Mod(), machine.IntMod());
+ EXPECT_EQ(machine.Uint64Mod(), machine.UintMod());
+ EXPECT_EQ(machine.Int64LessThan(), machine.IntLessThan());
+ EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
new file mode 100644
index 0000000000..0b3a0f5a41
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -0,0 +1,805 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+ MachInst2 mi;
+ uint32_t expected_size;
+};
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kMipsCmpD, kMachFloat64},
+ kUnorderedLessThanOrEqual},
+ {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+ "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
+ kUnorderedLessThanOrEqual}};
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, kMachInt16},
+ {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, kMachInt16},
+ {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, kMachInt16},
+ {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, kMachInt32},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, kMachInt32},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kShiftInstructions[] = {
+ {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, kMachInt16},
+ {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, kMachInt16},
+ {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, kMachInt16},
+ {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, kMachInt16},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, kMachInt32},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, kMachInt32},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, kMachInt32},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, kMachInt32},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, kMachInt32},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, kMachUint32},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, kMachFloat64},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kModInstructions[] = {
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, kMachInt32},
+ {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, kMachInt32},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, kMachFloat64},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, kMachInt32},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kMipsAddOvf, kMachInt32},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kMipsSubOvf, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+
+const MachInst1 kAddSubOneInstructions[] = {
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, kMachInt32},
+ // TODO(dusmil): check this ...
+ // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst, kMachInt32}
+};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+
+const IntCmp kCmpInstructions[] = {
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, kMachInt16}, 1U},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, kMachInt16},
+ 1U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp,
+ kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp,
+ kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp,
+ kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp,
+ kMachUint32},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kMipsCmp, kMachUint32},
+ 1U}};
+
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+ // Conversion instructions are related to machine_operator.h:
+ // FPU conversions:
+ // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+ // defined for these operators, since they are intended only for use with
+ // integers.
+ // mips instruction: cvt_d_w
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kMipsCvtDW, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: cvt_d_uw
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kMipsCvtDUw, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: trunc_w_d
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kMipsTruncWD, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: trunc_uw_d
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kMipsTruncUwD, kMachFloat64},
+ kMachInt32}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
+
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+ const IntCmp cmp = GetParam();
+ const MachineType type = cmp.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(cmp.expected_size, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
+
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Integer arithmetic.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorIntArithTwoTest;
+
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+ const MachInst2 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst1>
+ InstructionSelectorIntArithOneTest;
+
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+ const MachInst1 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kMipsLb, kMipsSb},
+ {kMachUint8, kMipsLbu, kMipsSb},
+ {kMachInt16, kMipsLh, kMipsSh},
+ {kMachUint16, kMipsLhu, kMipsSh},
+ {kMachInt32, kMipsLw, kMipsSw},
+ {kRepFloat32, kMipsLwc1, kMipsSwc1},
+ {kRepFloat64, kMipsLdc1, kMipsSdc1}};
+
+
+struct MemoryAccessImm {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+ return os << acc.type;
+}
+
+
+struct MemoryAccessImm1 {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[5];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+ return os << acc.type;
+}
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values.
+// ----------------------------------------------------------------------------
+
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+ {kMachInt8,
+ kMipsLb,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint8,
+ kMipsLbu,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachInt16,
+ kMipsLh,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint16,
+ kMipsLhu,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachInt32,
+ kMipsLw,
+ kMipsSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachFloat32,
+ kMipsLwc1,
+ kMipsSwc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachFloat64,
+ kMipsLdc1,
+ kMipsSdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+ {kMachInt8,
+ kMipsLb,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt8,
+ kMipsLbu,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt16,
+ kMipsLh,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt16,
+ kMipsLhu,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt32,
+ kMipsLw,
+ kMipsSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachFloat32,
+ kMipsLwc1,
+ kMipsSwc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachFloat64,
+ kMipsLdc1,
+ kMipsSdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm>
+ InstructionSelectorMemoryAccessImmTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
+
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ LoadWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // kMipsAdd is expected opcode.
+ // size more than 16 bits wide.
+ EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ StoreWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // kMipsAdd is expected opcode
+ // size more than 16 bits wide
+ EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+
+// ----------------------------------------------------------------------------
+// kMipsTst testing.
+// ----------------------------------------------------------------------------
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
new file mode 100644
index 0000000000..fde7f03c3c
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -0,0 +1,1070 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/node-test-utils.h"
+
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+using testing::_;
+using testing::MakeMatcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+template <typename T>
+bool PrintMatchAndExplain(const T& value, const char* value_name,
+ const Matcher<T>& value_matcher,
+ MatchResultListener* listener) {
+ StringMatchResultListener value_listener;
+ if (!value_matcher.MatchAndExplain(value, &value_listener)) {
+ *listener << "whose " << value_name << " " << value << " doesn't match";
+ if (value_listener.str() != "") {
+ *listener << ", " << value_listener.str();
+ }
+ return false;
+ }
+ return true;
+}
+
+
+class NodeMatcher : public MatcherInterface<Node*> {
+ public:
+ explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ *os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ if (node == NULL) {
+ *listener << "which is NULL";
+ return false;
+ }
+ if (node->opcode() != opcode_) {
+ *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode())
+ << " but should have been " << IrOpcode::Mnemonic(opcode_);
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ const IrOpcode::Value opcode_;
+};
+
+
+class IsBranchMatcher FINAL : public NodeMatcher {
+ public:
+ IsBranchMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kBranch),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsMergeMatcher FINAL : public NodeMatcher {
+ public:
+ IsMergeMatcher(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher)
+ : NodeMatcher(IrOpcode::kMerge),
+ control0_matcher_(control0_matcher),
+ control1_matcher_(control1_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control0 (";
+ control0_matcher_.DescribeTo(os);
+ *os << ") and control1 (";
+ control1_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+ "control0", control0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+ "control1", control1_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control0_matcher_;
+ const Matcher<Node*> control1_matcher_;
+};
+
+
+class IsControl1Matcher FINAL : public NodeMatcher {
+ public:
+ IsControl1Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsFinishMatcher FINAL : public NodeMatcher {
+ public:
+ IsFinishMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher)
+ : NodeMatcher(IrOpcode::kFinish),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+};
+
+
+template <typename T>
+class IsConstantMatcher FINAL : public NodeMatcher {
+ public:
+ IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
+ : NodeMatcher(opcode), value_matcher_(value_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
+ listener));
+ }
+
+ private:
+ const Matcher<T> value_matcher_;
+};
+
+
+class IsSelectMatcher FINAL : public NodeMatcher {
+ public:
+ IsSelectMatcher(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher)
+ : NodeMatcher(IrOpcode::kSelect),
+ type_matcher_(type_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ value2_matcher_(value2_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose type (";
+ type_matcher_.DescribeTo(os);
+ *os << "), value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << "), value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << ") and value2 (";
+ value2_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+ type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value2", value2_matcher_, listener));
+ }
+
+ private:
+ const Matcher<MachineType> type_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> value2_matcher_;
+};
+
+
+class IsPhiMatcher FINAL : public NodeMatcher {
+ public:
+ IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kPhi),
+ type_matcher_(type_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose type (";
+ type_matcher_.DescribeTo(os);
+ *os << "), value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << "), value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+ type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<MachineType> type_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsEffectPhiMatcher FINAL : public NodeMatcher {
+ public:
+ IsEffectPhiMatcher(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kEffectPhi),
+ effect0_matcher_(effect0_matcher),
+ effect1_matcher_(effect1_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << "), effect0 (";
+ effect0_matcher_.DescribeTo(os);
+ *os << "), effect1 (";
+ effect1_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
+ "effect0", effect0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 1),
+ "effect1", effect1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> effect0_matcher_;
+ const Matcher<Node*> effect1_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsProjectionMatcher FINAL : public NodeMatcher {
+ public:
+ IsProjectionMatcher(const Matcher<size_t>& index_matcher,
+ const Matcher<Node*>& base_matcher)
+ : NodeMatcher(IrOpcode::kProjection),
+ index_matcher_(index_matcher),
+ base_matcher_(base_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose index (";
+ index_matcher_.DescribeTo(os);
+ *os << ") and base (";
+ base_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<size_t>(node), "index",
+ index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener));
+ }
+
+ private:
+ const Matcher<size_t> index_matcher_;
+ const Matcher<Node*> base_matcher_;
+};
+
+
+class IsCall2Matcher FINAL : public NodeMatcher {
+ public:
+ IsCall2Matcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kCall),
+ descriptor_matcher_(descriptor_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << ") and value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
+ "descriptor", descriptor_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<CallDescriptor*> descriptor_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsCall4Matcher FINAL : public NodeMatcher {
+ public:
+ IsCall4Matcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kCall),
+ descriptor_matcher_(descriptor_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ value2_matcher_(value2_matcher),
+ value3_matcher_(value3_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << ") and value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << ") and value2 (";
+ value2_matcher_.DescribeTo(os);
+ *os << ") and value3 (";
+ value3_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
+ "descriptor", descriptor_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value2", value2_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+ "value3", value3_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<CallDescriptor*> descriptor_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> value2_matcher_;
+ const Matcher<Node*> value3_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadFieldMatcher FINAL : public NodeMatcher {
+ public:
+ IsLoadFieldMatcher(const Matcher<FieldAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kLoadField),
+ access_matcher_(access_matcher),
+ base_matcher_(base_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<FieldAccess> access_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadElementMatcher FINAL : public NodeMatcher {
+ public:
+ IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher)
+ : NodeMatcher(IrOpcode::kLoadElement),
+ access_matcher_(access_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher),
+ length_matcher_(length_matcher),
+ effect_matcher_(effect_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), index (";
+ index_matcher_.DescribeTo(os);
+ *os << "), length (";
+ length_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "index", index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener));
+ }
+
+ private:
+ const Matcher<ElementAccess> access_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> effect_matcher_;
+};
+
+
+class IsStoreElementMatcher FINAL : public NodeMatcher {
+ public:
+ IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kStoreElement),
+ access_matcher_(access_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher),
+ length_matcher_(length_matcher),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), index (";
+ index_matcher_.DescribeTo(os);
+ *os << "), length (";
+ length_matcher_.DescribeTo(os);
+ *os << "), value (";
+ value_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "index", index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<ElementAccess> access_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadMatcher FINAL : public NodeMatcher {
+ public:
+ IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kLoad),
+ rep_matcher_(rep_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), index (";
+ index_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
+ rep_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "index", index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<LoadRepresentation> rep_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsStoreMatcher FINAL : public NodeMatcher {
+ public:
+ IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kStore),
+ rep_matcher_(rep_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), index (";
+ index_matcher_.DescribeTo(os);
+ *os << "), value (";
+ value_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
+ rep_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "index", index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<StoreRepresentation> rep_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsBinopMatcher FINAL : public NodeMatcher {
+ public:
+ IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher)
+ : NodeMatcher(opcode),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose lhs (";
+ lhs_matcher_.DescribeTo(os);
+ *os << ") and rhs (";
+ rhs_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+ rhs_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+};
+
+
+class IsUnopMatcher FINAL : public NodeMatcher {
+ public:
+ IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
+ : NodeMatcher(opcode), input_matcher_(input_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose input (";
+ input_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "input", input_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> input_matcher_;
+};
+}
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsBranchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher) {
+ return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+}
+
+
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
+}
+
+
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfFalse, control_matcher));
+}
+
+
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
+ return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
+}
+
+
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher) {
+ return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsExternalConstant(
+ const Matcher<ExternalReference>& value_matcher) {
+ return MakeMatcher(new IsConstantMatcher<ExternalReference>(
+ IrOpcode::kExternalConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsHeapConstant(
+ const Matcher<Unique<HeapObject> >& value_matcher) {
+ return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
+ IrOpcode::kHeapConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher) {
+ return MakeMatcher(new IsSelectMatcher(type_matcher, value0_matcher,
+ value1_matcher, value2_matcher));
+}
+
+
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& merge_matcher) {
+ return MakeMatcher(new IsPhiMatcher(type_matcher, value0_matcher,
+ value1_matcher, merge_matcher));
+}
+
+
+Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher,
+ const Matcher<Node*>& merge_matcher) {
+ return MakeMatcher(
+ new IsEffectPhiMatcher(effect0_matcher, effect1_matcher, merge_matcher));
+}
+
+
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+ const Matcher<Node*>& base_matcher) {
+ return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsCall2Matcher(descriptor_matcher, value0_matcher,
+ value1_matcher, effect_matcher,
+ control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsCall4Matcher(
+ descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
+ value3_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadFieldMatcher(access_matcher, base_matcher,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher) {
+ return MakeMatcher(new IsLoadElementMatcher(access_matcher, base_matcher,
+ index_matcher, length_matcher,
+ effect_matcher));
+}
+
+
+Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsStoreElementMatcher(
+ access_matcher, base_matcher, index_matcher, length_matcher,
+ value_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadMatcher(rep_matcher, base_matcher, index_matcher,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsStoreMatcher(rep_matcher, base_matcher,
+ index_matcher, value_matcher,
+ effect_matcher, control_matcher));
+}
+
+
+#define IS_BINOP_MATCHER(Name) \
+ Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& rhs_matcher) { \
+ return MakeMatcher( \
+ new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
+ }
+IS_BINOP_MATCHER(NumberEqual)
+IS_BINOP_MATCHER(NumberLessThan)
+IS_BINOP_MATCHER(NumberSubtract)
+IS_BINOP_MATCHER(NumberMultiply)
+IS_BINOP_MATCHER(Word32And)
+IS_BINOP_MATCHER(Word32Sar)
+IS_BINOP_MATCHER(Word32Shl)
+IS_BINOP_MATCHER(Word32Shr)
+IS_BINOP_MATCHER(Word32Ror)
+IS_BINOP_MATCHER(Word32Equal)
+IS_BINOP_MATCHER(Word64And)
+IS_BINOP_MATCHER(Word64Sar)
+IS_BINOP_MATCHER(Word64Shl)
+IS_BINOP_MATCHER(Word64Equal)
+IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Add)
+IS_BINOP_MATCHER(Int32Sub)
+IS_BINOP_MATCHER(Int32Mul)
+IS_BINOP_MATCHER(Int32MulHigh)
+IS_BINOP_MATCHER(Int32LessThan)
+IS_BINOP_MATCHER(Uint32LessThan)
+IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Float64Sub)
+#undef IS_BINOP_MATCHER
+
+
+#define IS_UNOP_MATCHER(Name) \
+ Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
+ return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
+ }
+IS_UNOP_MATCHER(BooleanNot)
+IS_UNOP_MATCHER(ChangeFloat64ToInt32)
+IS_UNOP_MATCHER(ChangeFloat64ToUint32)
+IS_UNOP_MATCHER(ChangeInt32ToFloat64)
+IS_UNOP_MATCHER(ChangeInt32ToInt64)
+IS_UNOP_MATCHER(ChangeUint32ToFloat64)
+IS_UNOP_MATCHER(ChangeUint32ToUint64)
+IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
+IS_UNOP_MATCHER(TruncateFloat64ToInt32)
+IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
+IS_UNOP_MATCHER(Float64Floor)
+IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundTruncate)
+IS_UNOP_MATCHER(Float64RoundTiesAway)
+#undef IS_UNOP_MATCHER
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
new file mode 100644
index 0000000000..870d55513d
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -0,0 +1,171 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
+#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/machine-type.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ExternalReference;
+class HeapObject;
+template <class T>
+class Unique;
+
+namespace compiler {
+
+// Forward declarations.
+class CallDescriptor;
+struct ElementAccess;
+struct FieldAccess;
+class Node;
+
+
+using ::testing::Matcher;
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsExternalConstant(
+ const Matcher<ExternalReference>& value_matcher);
+Matcher<Node*> IsHeapConstant(
+ const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher,
+ const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+ const Matcher<Node*>& base_matcher);
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+
+Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberMultiply(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Shr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Add(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Sub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32MulHigh(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint32LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
new file mode 100644
index 0000000000..dbcdedb798
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -0,0 +1,513 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/register-allocator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef BasicBlock::RpoNumber Rpo;
+
+namespace {
+
+static const char*
+ general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
+static const char*
+ double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
+static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
+ RegisterConfiguration::kMaxDoubleRegisters)];
+
+
+static void InitializeRegisterNames() {
+ char* loc = register_names_;
+ for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
+ general_register_names_[i] = loc;
+ loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
+ *loc++ = 0;
+ }
+ for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
+ double_register_names_[i] = loc;
+ loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
+ *loc++ = 0;
+ }
+}
+
+enum BlockCompletionType { kFallThrough, kBranch, kJump };
+
+struct BlockCompletion {
+ BlockCompletionType type_;
+ int vreg_;
+ int offset_0_;
+ int offset_1_;
+};
+
+static const int kInvalidJumpOffset = kMinInt;
+
+BlockCompletion FallThrough() {
+ BlockCompletion completion = {kFallThrough, -1, 1, kInvalidJumpOffset};
+ return completion;
+}
+
+
+BlockCompletion Jump(int offset) {
+ BlockCompletion completion = {kJump, -1, offset, kInvalidJumpOffset};
+ return completion;
+}
+
+
+BlockCompletion Branch(int vreg, int left_offset, int right_offset) {
+ BlockCompletion completion = {kBranch, vreg, left_offset, right_offset};
+ return completion;
+}
+
+} // namespace
+
+
+class RegisterAllocatorTest : public TestWithZone {
+ public:
+ static const int kDefaultNRegs = 4;
+
+ RegisterAllocatorTest()
+ : num_general_registers_(kDefaultNRegs),
+ num_double_registers_(kDefaultNRegs),
+ instruction_blocks_(zone()),
+ current_block_(nullptr),
+ is_last_block_(false) {
+ InitializeRegisterNames();
+ }
+
+ void SetNumRegs(int num_general_registers, int num_double_registers) {
+ CHECK(instruction_blocks_.empty());
+ num_general_registers_ = num_general_registers;
+ num_double_registers_ = num_double_registers;
+ }
+
+ RegisterConfiguration* config() {
+ if (config_.is_empty()) {
+ config_.Reset(new RegisterConfiguration(
+ num_general_registers_, num_double_registers_, num_double_registers_,
+ general_register_names_, double_register_names_));
+ }
+ return config_.get();
+ }
+
+ Frame* frame() {
+ if (frame_.is_empty()) {
+ frame_.Reset(new Frame());
+ }
+ return frame_.get();
+ }
+
+ InstructionSequence* sequence() {
+ if (sequence_.is_empty()) {
+ sequence_.Reset(new InstructionSequence(zone(), &instruction_blocks_));
+ }
+ return sequence_.get();
+ }
+
+ RegisterAllocator* allocator() {
+ if (allocator_.is_empty()) {
+ allocator_.Reset(
+ new RegisterAllocator(config(), zone(), frame(), sequence()));
+ }
+ return allocator_.get();
+ }
+
+ void StartLoop(int loop_blocks) {
+ CHECK(current_block_ == nullptr);
+ if (!loop_blocks_.empty()) {
+ CHECK(!loop_blocks_.back().loop_header_.IsValid());
+ }
+ LoopData loop_data = {Rpo::Invalid(), loop_blocks};
+ loop_blocks_.push_back(loop_data);
+ }
+
+ void EndLoop() {
+ CHECK(current_block_ == nullptr);
+ CHECK(!loop_blocks_.empty());
+ CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
+ loop_blocks_.pop_back();
+ }
+
+ void StartLastBlock() {
+ CHECK(!is_last_block_);
+ is_last_block_ = true;
+ NewBlock();
+ }
+
+ void StartBlock() {
+ CHECK(!is_last_block_);
+ NewBlock();
+ }
+
+ void EndBlock(BlockCompletion completion = FallThrough()) {
+ completions_.push_back(completion);
+ switch (completion.type_) {
+ case kFallThrough:
+ if (is_last_block_) break;
+ // TODO(dcarney): we don't emit this after returns.
+ EmitFallThrough();
+ break;
+ case kJump:
+ EmitJump();
+ break;
+ case kBranch:
+ EmitBranch(completion.vreg_);
+ break;
+ }
+ CHECK(current_block_ != nullptr);
+ sequence()->EndBlock(current_block_->rpo_number());
+ current_block_ = nullptr;
+ }
+
+ void Allocate() {
+ CHECK_EQ(nullptr, current_block_);
+ CHECK(is_last_block_);
+ WireBlocks();
+ if (FLAG_trace_alloc || FLAG_trace_turbo) {
+ OFStream os(stdout);
+ PrintableInstructionSequence printable = {config(), sequence()};
+ os << "Before: " << std::endl << printable << std::endl;
+ }
+ allocator()->Allocate();
+ if (FLAG_trace_alloc || FLAG_trace_turbo) {
+ OFStream os(stdout);
+ PrintableInstructionSequence printable = {config(), sequence()};
+ os << "After: " << std::endl << printable << std::endl;
+ }
+ }
+
+ int NewReg() { return sequence()->NextVirtualRegister(); }
+
+ int Parameter() {
+ int vreg = NewReg();
+ InstructionOperand* outputs[1]{UseRegister(vreg)};
+ Emit(kArchNop, 1, outputs);
+ return vreg;
+ }
+
+ Instruction* Return(int vreg) {
+ InstructionOperand* inputs[1]{UseRegister(vreg)};
+ return Emit(kArchRet, 0, nullptr, 1, inputs);
+ }
+
+ PhiInstruction* Phi(int vreg) {
+ PhiInstruction* phi = new (zone()) PhiInstruction(zone(), NewReg());
+ phi->operands().push_back(vreg);
+ current_block_->AddPhi(phi);
+ return phi;
+ }
+
+ int DefineConstant(int32_t imm = 0) {
+ int virtual_register = NewReg();
+ sequence()->AddConstant(virtual_register, Constant(imm));
+ InstructionOperand* outputs[1]{
+ ConstantOperand::Create(virtual_register, zone())};
+ Emit(kArchNop, 1, outputs);
+ return virtual_register;
+ }
+
+ ImmediateOperand* Immediate(int32_t imm = 0) {
+ int index = sequence()->AddImmediate(Constant(imm));
+ return ImmediateOperand::Create(index, zone());
+ }
+
+ Instruction* EmitFRI(int output_vreg, int input_vreg_0) {
+ InstructionOperand* outputs[1]{DefineSameAsFirst(output_vreg)};
+ InstructionOperand* inputs[2]{UseRegister(input_vreg_0), Immediate()};
+ return Emit(kArchNop, 1, outputs, 2, inputs);
+ }
+
+ Instruction* EmitFRU(int output_vreg, int input_vreg_0, int input_vreg_1) {
+ InstructionOperand* outputs[1]{DefineSameAsFirst(output_vreg)};
+ InstructionOperand* inputs[2]{UseRegister(input_vreg_0), Use(input_vreg_1)};
+ return Emit(kArchNop, 1, outputs, 2, inputs);
+ }
+
+ Instruction* EmitRRR(int output_vreg, int input_vreg_0, int input_vreg_1) {
+ InstructionOperand* outputs[1]{UseRegister(output_vreg)};
+ InstructionOperand* inputs[2]{UseRegister(input_vreg_0),
+ UseRegister(input_vreg_1)};
+ return Emit(kArchNop, 1, outputs, 2, inputs);
+ }
+
+ private:
+ InstructionOperand* Unallocated(int vreg,
+ UnallocatedOperand::ExtendedPolicy policy) {
+ UnallocatedOperand* op = new (zone()) UnallocatedOperand(policy);
+ op->set_virtual_register(vreg);
+ return op;
+ }
+
+ InstructionOperand* Unallocated(int vreg,
+ UnallocatedOperand::ExtendedPolicy policy,
+ UnallocatedOperand::Lifetime lifetime) {
+ UnallocatedOperand* op = new (zone()) UnallocatedOperand(policy, lifetime);
+ op->set_virtual_register(vreg);
+ return op;
+ }
+
+ InstructionOperand* UseRegister(int vreg) {
+ return Unallocated(vreg, UnallocatedOperand::MUST_HAVE_REGISTER);
+ }
+
+ InstructionOperand* DefineSameAsFirst(int vreg) {
+ return Unallocated(vreg, UnallocatedOperand::SAME_AS_FIRST_INPUT);
+ }
+
+ InstructionOperand* Use(int vreg) {
+ return Unallocated(vreg, UnallocatedOperand::NONE,
+ UnallocatedOperand::USED_AT_START);
+ }
+
+ void EmitBranch(int vreg) {
+ InstructionOperand* inputs[4]{UseRegister(vreg), Immediate(), Immediate(),
+ Immediate()};
+ InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
+ FlagsConditionField::encode(kEqual);
+ Instruction* instruction =
+ NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
+ sequence()->AddInstruction(instruction);
+ }
+
+ void EmitFallThrough() {
+ Instruction* instruction =
+ NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
+ sequence()->AddInstruction(instruction);
+ }
+
+ void EmitJump() {
+ InstructionOperand* inputs[1]{Immediate()};
+ Instruction* instruction =
+ NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
+ sequence()->AddInstruction(instruction);
+ }
+
+ Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
+ InstructionOperand** outputs,
+ size_t inputs_size = 0,
+ InstructionOperand* *inputs = nullptr,
+ size_t temps_size = 0,
+ InstructionOperand* *temps = nullptr) {
+ CHECK_NE(nullptr, current_block_);
+ return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
+ inputs, temps_size, temps);
+ }
+
+ Instruction* Emit(InstructionCode code, size_t outputs_size,
+ InstructionOperand** outputs, size_t inputs_size = 0,
+ InstructionOperand* *inputs = nullptr,
+ size_t temps_size = 0,
+ InstructionOperand* *temps = nullptr) {
+ Instruction* instruction = NewInstruction(
+ code, outputs_size, outputs, inputs_size, inputs, temps_size, temps);
+ sequence()->AddInstruction(instruction);
+ return instruction;
+ }
+
+ InstructionBlock* NewBlock() {
+ CHECK(current_block_ == nullptr);
+ BasicBlock::Id block_id =
+ BasicBlock::Id::FromSize(instruction_blocks_.size());
+ Rpo rpo = Rpo::FromInt(block_id.ToInt());
+ Rpo loop_header = Rpo::Invalid();
+ Rpo loop_end = Rpo::Invalid();
+ if (!loop_blocks_.empty()) {
+ auto& loop_data = loop_blocks_.back();
+ // This is a loop header.
+ if (!loop_data.loop_header_.IsValid()) {
+ loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
+ loop_data.expected_blocks_--;
+ loop_data.loop_header_ = rpo;
+ } else {
+ // This is a loop body.
+ CHECK_NE(0, loop_data.expected_blocks_);
+ // TODO(dcarney): handle nested loops.
+ loop_data.expected_blocks_--;
+ loop_header = loop_data.loop_header_;
+ }
+ }
+ // Construct instruction block.
+ InstructionBlock* instruction_block = new (zone()) InstructionBlock(
+ zone(), block_id, rpo, rpo, loop_header, loop_end, false);
+ instruction_blocks_.push_back(instruction_block);
+ current_block_ = instruction_block;
+ sequence()->StartBlock(rpo);
+ return instruction_block;
+ }
+
+ void WireBlocks() {
+ CHECK(instruction_blocks_.size() == completions_.size());
+ size_t offset = 0;
+ size_t size = instruction_blocks_.size();
+ for (const auto& completion : completions_) {
+ switch (completion.type_) {
+ case kFallThrough:
+ if (offset == size - 1) break;
+ // Fallthrough.
+ case kJump:
+ WireBlock(offset, completion.offset_0_);
+ break;
+ case kBranch:
+ WireBlock(offset, completion.offset_0_);
+ WireBlock(offset, completion.offset_1_);
+ break;
+ }
+ ++offset;
+ }
+ }
+
+ void WireBlock(size_t block_offset, int jump_offset) {
+ size_t target_block_offset =
+ block_offset + static_cast<size_t>(jump_offset);
+ CHECK(block_offset < instruction_blocks_.size());
+ CHECK(target_block_offset < instruction_blocks_.size());
+ InstructionBlock* block = instruction_blocks_[block_offset];
+ InstructionBlock* target = instruction_blocks_[target_block_offset];
+ block->successors().push_back(target->rpo_number());
+ target->predecessors().push_back(block->rpo_number());
+ }
+
+ struct LoopData {
+ Rpo loop_header_;
+ int expected_blocks_;
+ };
+ typedef std::vector<LoopData> LoopBlocks;
+ typedef std::vector<BlockCompletion> Completions;
+
+ SmartPointer<RegisterConfiguration> config_;
+ SmartPointer<Frame> frame_;
+ SmartPointer<RegisterAllocator> allocator_;
+ SmartPointer<InstructionSequence> sequence_;
+ int num_general_registers_;
+ int num_double_registers_;
+
+ // Block building state.
+ InstructionBlocks instruction_blocks_;
+ Completions completions_;
+ LoopBlocks loop_blocks_;
+ InstructionBlock* current_block_;
+ bool is_last_block_;
+};
+
+
+TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
+ StartLastBlock();
+ int a_reg = Parameter();
+ int b_reg = Parameter();
+ int c_reg = NewReg();
+ Instruction* res = EmitRRR(c_reg, a_reg, b_reg);
+ Return(c_reg);
+ EndBlock();
+
+ Allocate();
+
+ ASSERT_TRUE(res->OutputAt(0)->IsRegister());
+}
+
+
+TEST_F(RegisterAllocatorTest, SimpleLoop) {
+ // i = K;
+ // while(true) { i++ }
+
+ StartBlock();
+ int i_reg = DefineConstant();
+ EndBlock();
+
+ {
+ StartLoop(1);
+
+ StartLastBlock();
+ PhiInstruction* phi = Phi(i_reg);
+ int ipp = NewReg();
+ EmitFRU(ipp, phi->virtual_register(), DefineConstant());
+ phi->operands().push_back(ipp);
+ EndBlock(Jump(0));
+
+ EndLoop();
+ }
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, SimpleBranch) {
+ // return i ? K1 : K2
+ StartBlock();
+ int i_reg = DefineConstant();
+ EndBlock(Branch(i_reg, 1, 2));
+
+ StartBlock();
+ Return(DefineConstant());
+ EndBlock();
+
+ StartLastBlock();
+ Return(DefineConstant());
+ EndBlock();
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
+ const size_t kNumRegs = 3;
+ const size_t kParams = kNumRegs + 1;
+ int parameters[kParams];
+
+ // Override number of registers.
+ SetNumRegs(kNumRegs, kNumRegs);
+
+ // Initial block.
+ StartBlock();
+ int constant = DefineConstant();
+ for (size_t i = 0; i < arraysize(parameters); ++i) {
+ parameters[i] = DefineConstant();
+ }
+ EndBlock();
+
+ PhiInstruction* phis[kParams];
+ {
+ StartLoop(2);
+
+ // Loop header.
+ StartBlock();
+
+ for (size_t i = 0; i < arraysize(parameters); ++i) {
+ phis[i] = Phi(parameters[i]);
+ }
+
+ // Perform some computations.
+ // something like phi[i] += const
+ for (size_t i = 0; i < arraysize(parameters); ++i) {
+ int result = NewReg();
+ EmitFRU(result, phis[i]->virtual_register(), constant);
+ phis[i]->operands().push_back(result);
+ }
+
+ EndBlock(Branch(DefineConstant(), 1, 2));
+
+ // Jump back to loop header.
+ StartBlock();
+ EndBlock(Jump(-1));
+
+ EndLoop();
+ }
+
+ // End block.
+ StartLastBlock();
+
+ // Return sum.
+ Return(DefineConstant());
+ EndBlock();
+
+ Allocate();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/select-lowering-unittest.cc b/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
new file mode 100644
index 0000000000..6dbd7ad73d
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/select-lowering.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SelectLoweringTest : public GraphTest {
+ public:
+ SelectLoweringTest() : GraphTest(5), lowering_(graph(), common()) {}
+
+ protected:
+ Reduction Reduce(Node* node) { return lowering_.Reduce(node); }
+
+ private:
+ SelectLowering lowering_;
+};
+
+
+TEST_F(SelectLoweringTest, SelectWithSameConditions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Node* const p2 = Parameter(2);
+ Node* const p3 = Parameter(3);
+ Node* const p4 = Parameter(4);
+
+ Capture<Node*> branch;
+ Capture<Node*> merge;
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(
+ kMachInt32, p1, p2,
+ AllOf(CaptureEq(&merge),
+ IsMerge(IsIfTrue(CaptureEq(&branch)),
+ IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(p0, graph()->start())))))));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p3, p4));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
new file mode 100644
index 0000000000..465ee84b4f
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -0,0 +1,603 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/conversions.h"
+#include "src/types.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedOperatorReducerTest : public GraphTest {
+ public:
+ explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
+ : GraphTest(num_parameters), simplified_(zone()) {}
+ virtual ~SimplifiedOperatorReducerTest() {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine;
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &machine);
+ SimplifiedOperatorReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+};
+
+
+template <typename T>
+class SimplifiedOperatorReducerTestWithParam
+ : public SimplifiedOperatorReducerTest,
+ public ::testing::WithParamInterface<T> {
+ public:
+ explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
+ : SimplifiedOperatorReducerTest(num_parameters) {}
+ virtual ~SimplifiedOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const double kFloat64Values[] = {
+ -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+ -8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
+ -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
+ -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
+ -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453,
+ -15790, -11699, -111, -97, -78,
+ -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
+ -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
+ -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+ -1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
+ -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307,
+ 4.16643e-227, 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173,
+ 2.88603e-172, 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73,
+ 1.24064e-58, 3.11167e-58, 2.75826e-39, 0.143815, 58,
+ 67, 601, 7941, 11644, 13697,
+ 25680, 29882, 1.32165e+08, 1.62439e+08, 4.16837e+08,
+ 9.59097e+08, 1.32491e+09, 1.8728e+09, 1.0672e+17, 2.69606e+46,
+ 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121, 9.36506e+123,
+ 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189, 1.1622e+202,
+ 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308, V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+ -2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
+ -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
+ -1226046939, -941837148, -779818051, -413830641, -245798087,
+ -184657557, -127145950, -105483328, -32325, -26653,
+ -23858, -23834, -22363, -19858, -19044,
+ -18744, -15528, -5309, -3372, -2093,
+ -104, -98, -97, -93, -84,
+ -80, -78, -76, -72, -58,
+ -57, -56, -55, -45, -40,
+ -34, -32, -25, -24, -5,
+ -2, 0, 3, 10, 24,
+ 34, 42, 46, 47, 48,
+ 52, 56, 64, 65, 71,
+ 76, 79, 81, 82, 97,
+ 102, 103, 104, 106, 107,
+ 109, 116, 122, 3653, 4485,
+ 12405, 16504, 26262, 28704, 29755,
+ 30554, 16476817, 605431957, 832401070, 873617242,
+ 914205764, 1062628108, 1087581664, 1488498068, 1534668023,
+ 1661587028, 1696896187, 1866841746, 2032089723, 2147483647};
+
+
+static const uint32_t kUint32Values[] = {
+ 0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
+ 0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
+ 0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
+ 0x83, 0x88, 0x96, 0x9c, 0xa3, 0xfa,
+ 0x7a7, 0x165d, 0x234d, 0x3acb, 0x43a5, 0x4573,
+ 0x5b4f, 0x5f14, 0x6996, 0x6c6e, 0x7289, 0x7b9a,
+ 0x7bc9, 0x86bb, 0xa839, 0xaa41, 0xb03b, 0xc942,
+ 0xce68, 0xcf4c, 0xd3ad, 0xdea3, 0xe90c, 0xed86,
+ 0xfba5, 0x172dcc6, 0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
+ 0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
+ 0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
+ 0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
+ 0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
+ 0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
+ 0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
+ 0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
+ 0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
+
+
+MATCHER(IsNaN, std::string(negation ? "isn't" : "is") + " NaN") {
+ return std::isnan(arg);
+}
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+ return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+ {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
+ {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
+ {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
+ {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
+ "ChangeFloat64ToTagged"},
+ {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
+ "ChangeTaggedToFloat64"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
+ {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+
+} // namespace
+
+
+typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
+ SimplifiedUnaryOperatorTest;
+
+
+TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
+ const UnaryOperator& unop = GetParam();
+ Reduction reduction = Reduce(
+ graph()->NewNode((simplified()->*unop.constructor)(), Parameter(0)));
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
+ SimplifiedUnaryOperatorTest,
+ ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// BooleanNot
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithBooleanNot) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->BooleanNot(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithFalseConstant) {
+ Reduction reduction0 =
+ Reduce(graph()->NewNode(simplified()->BooleanNot(), FalseConstant()));
+ ASSERT_TRUE(reduction0.Changed());
+ EXPECT_THAT(reduction0.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
+ Reduction reduction1 =
+ Reduce(graph()->NewNode(simplified()->BooleanNot(), TrueConstant()));
+ ASSERT_TRUE(reduction1.Changed());
+ EXPECT_THAT(reduction1.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeBitToBool(),
+ graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeBoolToBit(),
+ graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(n));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
+ TRACED_FOREACH(int32_t, n, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeInt32ToTagged(), Int32Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastI2D(n)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToFloat64
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeInt32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeInt32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeUint32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(n));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToInt32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToInt32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToInt32WithChangeInt32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
+ TRACED_FOREACH(uint32_t, n, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
+ Int32Constant(bit_cast<int32_t>(n))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// LoadElement
+
+
+TEST_F(SimplifiedOperatorReducerTest, LoadElementWithConstantKeyAndLength) {
+ ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
+ Type::Any(), kMachAnyTagged};
+ ElementAccess access_nocheck = access;
+ access_nocheck.bounds_check = kNoBoundsCheck;
+ Node* const base = Parameter(0);
+ Node* const effect = graph()->start();
+ {
+ Node* const key = NumberConstant(-42.0);
+ Node* const length = NumberConstant(100.0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
+ base, key, length, effect));
+ ASSERT_FALSE(r.Changed());
+ }
+ {
+ Node* const key = NumberConstant(-0.0);
+ Node* const length = NumberConstant(1.0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
+ base, key, length, effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
+ }
+ {
+ Node* const key = NumberConstant(0);
+ Node* const length = NumberConstant(1);
+ Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
+ base, key, length, effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
+ }
+ {
+ Node* const key = NumberConstant(42.2);
+ Node* const length = NumberConstant(128);
+ Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
+ base, key, length, effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
+ }
+ {
+ Node* const key = NumberConstant(39.2);
+ Node* const length = NumberConstant(32.0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
+ base, key, length, effect));
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// StoreElement
+
+
+TEST_F(SimplifiedOperatorReducerTest, StoreElementWithConstantKeyAndLength) {
+ ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
+ Type::Any(), kMachAnyTagged};
+ ElementAccess access_nocheck = access;
+ access_nocheck.bounds_check = kNoBoundsCheck;
+ Node* const base = Parameter(0);
+ Node* const value = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ {
+ Node* const key = NumberConstant(-72.1);
+ Node* const length = NumberConstant(0.0);
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
+ length, value, effect, control));
+ ASSERT_FALSE(r.Changed());
+ }
+ {
+ Node* const key = NumberConstant(-0.0);
+ Node* const length = NumberConstant(999);
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
+ length, value, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStoreElement(access_nocheck, base, key, length, value, effect,
+ control));
+ }
+ {
+ Node* const key = NumberConstant(0);
+ Node* const length = NumberConstant(1);
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
+ length, value, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStoreElement(access_nocheck, base, key, length, value, effect,
+ control));
+ }
+ {
+ Node* const key = NumberConstant(42.2);
+ Node* const length = NumberConstant(128);
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
+ length, value, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStoreElement(access_nocheck, base, key, length, value, effect,
+ control));
+ }
+ {
+ Node* const key = NumberConstant(39.2);
+ Node* const length = NumberConstant(32.0);
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
+ length, value, effect, control));
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
new file mode 100644
index 0000000000..031a8974f8
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -0,0 +1,215 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/compiler/operator-properties-inl.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+ return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, properties, input_count) \
+ { \
+ &SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
+ Operator::kPure | properties, input_count \
+ }
+ PURE(BooleanNot, Operator::kNoProperties, 1),
+ PURE(NumberEqual, Operator::kCommutative, 2),
+ PURE(NumberLessThan, Operator::kNoProperties, 2),
+ PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
+ PURE(NumberAdd, Operator::kCommutative, 2),
+ PURE(NumberSubtract, Operator::kNoProperties, 2),
+ PURE(NumberMultiply, Operator::kCommutative, 2),
+ PURE(NumberDivide, Operator::kNoProperties, 2),
+ PURE(NumberModulus, Operator::kNoProperties, 2),
+ PURE(NumberToInt32, Operator::kNoProperties, 1),
+ PURE(NumberToUint32, Operator::kNoProperties, 1),
+ PURE(StringEqual, Operator::kCommutative, 2),
+ PURE(StringLessThan, Operator::kNoProperties, 2),
+ PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
+ PURE(StringAdd, Operator::kNoProperties, 2),
+ PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
+ PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
+ PURE(ChangeBitToBool, Operator::kNoProperties, 1)
+#undef PURE
+};
+
+} // namespace
+
+
+class SimplifiedPureOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<PureOperator> {};
+
+
+TEST_P(SimplifiedPureOperatorTest, InstancesAreGloballyShared) {
+ const PureOperator& pop = GetParam();
+ SimplifiedOperatorBuilder simplified1(zone());
+ SimplifiedOperatorBuilder simplified2(zone());
+ EXPECT_EQ((simplified1.*pop.constructor)(), (simplified2.*pop.constructor)());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, NumberOfInputsAndOutputs) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+
+ EXPECT_EQ(pop.value_input_count, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(0, op->ControlInputCount());
+ EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, OpcodeIsCorrect) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+ EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, Properties) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+ EXPECT_EQ(pop.properties, op->properties() & pop.properties);
+}
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
+ ::testing::ValuesIn(kPureOperators));
+
+
+// -----------------------------------------------------------------------------
+// Element access operators.
+
+namespace {
+
+const ElementAccess kElementAccesses[] = {
+ {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ kMachAnyTagged},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachInt8},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachInt16},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachInt32},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachUint8},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachUint16},
+ {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
+ Type::Any(), kMachUint32},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat32},
+ {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat64},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Signed32(), kMachInt8},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Unsigned32(), kMachUint8},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Signed32(), kMachInt16},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Unsigned32(), kMachUint16},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Signed32(), kMachInt32},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Unsigned32(), kMachUint32},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Number(), kRepFloat32},
+ {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
+ Type::Number(), kRepFloat64}};
+
+} // namespace
+
+
+class SimplifiedElementAccessOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<ElementAccess> {};
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const ElementAccess& access = GetParam();
+ const Operator* op = simplified.LoadElement(access);
+
+ EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
+ EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(access, ElementAccessOf(op));
+
+ EXPECT_EQ(3, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(0, op->ControlInputCount());
+ EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const ElementAccess& access = GetParam();
+ const Operator* op = simplified.StoreElement(access);
+
+ EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
+ EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(access, ElementAccessOf(op));
+
+ EXPECT_EQ(4, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+ SimplifiedElementAccessOperatorTest,
+ ::testing::ValuesIn(kElementAccesses));
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
new file mode 100644
index 0000000000..b6be0bff17
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct TestOperator : public Operator {
+ TestOperator(Operator::Opcode opcode, Operator::Properties properties,
+ size_t value_in, size_t value_out)
+ : Operator(opcode, properties, "TestOp", value_in, 0, 0, value_out, 0,
+ 0) {}
+};
+
+
+static const TestOperator kOp0(0, Operator::kEliminatable, 0, 1);
+static const TestOperator kOp1(1, Operator::kEliminatable, 1, 1);
+
+
+class ValueNumberingReducerTest : public TestWithZone {
+ public:
+ ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+
+ protected:
+ Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+ Graph* graph() { return &graph_; }
+
+ private:
+ Graph graph_;
+ ValueNumberingReducer reducer_;
+};
+
+
+TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
+ Node* na = graph()->NewNode(&kOp0);
+ Node* nb = graph()->NewNode(&kOp0);
+ Node* n1 = graph()->NewNode(&kOp0, na);
+ Node* n2 = graph()->NewNode(&kOp0, nb);
+ EXPECT_FALSE(Reduce(n1).Changed());
+ EXPECT_FALSE(Reduce(n2).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
+ Node* n0 = graph()->NewNode(&kOp0);
+ Node* n1 = graph()->NewNode(&kOp1, n0);
+ EXPECT_FALSE(Reduce(n1).Changed());
+ n1->Kill();
+ EXPECT_FALSE(Reduce(graph()->NewNode(&kOp1, n0)).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, OnlyEliminatableNodesAreReduced) {
+ TestOperator op(0, Operator::kNoProperties, 0, 1);
+ Node* n0 = graph()->NewNode(&op);
+ Node* n1 = graph()->NewNode(&op);
+ EXPECT_FALSE(Reduce(n0).Changed());
+ EXPECT_FALSE(Reduce(n1).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
+ static const size_t kMaxInputCount = 16;
+ Node* inputs[kMaxInputCount];
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(
+ std::numeric_limits<Operator::Opcode>::max() - i);
+ inputs[i] = graph()->NewNode(
+ new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ }
+ TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+ const TestOperator op1(static_cast<Operator::Opcode>(input_count),
+ Operator::kEliminatable, input_count, 1);
+ Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+ Reduction r1 = Reduce(n1);
+ EXPECT_FALSE(r1.Changed());
+
+ const TestOperator op2(static_cast<Operator::Opcode>(input_count),
+ Operator::kEliminatable, input_count, 1);
+ Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
+ Reduction r2 = Reduce(n2);
+ EXPECT_TRUE(r2.Changed());
+ EXPECT_EQ(n1, r2.replacement());
+ }
+}
+
+
+TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
+ static const size_t kMaxInputCount = 16;
+ Node* inputs[kMaxInputCount];
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(
+ std::numeric_limits<Operator::Opcode>::max() - i);
+ inputs[i] = graph()->NewNode(
+ new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ }
+ TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+ const TestOperator op1(1, Operator::kEliminatable, input_count, 1);
+ Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+ Reduction r = Reduce(n);
+ EXPECT_FALSE(r.Changed());
+
+ r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(n, r.replacement());
+
+ r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(n, r.replacement());
+ }
+}
+
+
+TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
+ Node* n = graph()->NewNode(&kOp0);
+ EXPECT_FALSE(Reduce(n).Changed());
+ EXPECT_FALSE(Reduce(n).Changed());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
new file mode 100644
index 0000000000..48c074e046
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -0,0 +1,370 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt32);
+ m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachUint32);
+ m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+ StreamBuilder m(this, kMachUint64, kMachUint32);
+ m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ return os << memacc.type;
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kX64Movsxbl, kX64Movb},
+ {kMachUint8, kX64Movzxbl, kX64Movb},
+ {kMachInt16, kX64Movsxwl, kX64Movw},
+ {kMachUint16, kX64Movzxwl, kX64Movw},
+ {kMachInt32, kX64Movl, kX64Movl},
+ {kMachUint32, kX64Movl, kX64Movl},
+ {kMachInt64, kX64Movq, kX64Movq},
+ {kMachUint64, kX64Movq, kX64Movq},
+ {kMachFloat32, kX64Movss, kX64Movss},
+ {kMachFloat64, kX64Movsd, kX64Movsd}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToUint64.
+
+
+namespace {
+
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+struct BinaryOperation {
+ Constructor constructor;
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const BinaryOperation& bop) {
+ return os << bop.constructor_name;
+}
+
+
+const BinaryOperation kWord32BinaryOperations[] = {
+ {&RawMachineAssembler::Word32And, "Word32And"},
+ {&RawMachineAssembler::Word32Or, "Word32Or"},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor"},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl"},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr"},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar"},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror"},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal"},
+ {&RawMachineAssembler::Int32Add, "Int32Add"},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub"},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul"},
+ {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh"},
+ {&RawMachineAssembler::Int32Div, "Int32Div"},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan"},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual"},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod"},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div"},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan"},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod"}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<BinaryOperation>
+ InstructionSelectorChangeUint32ToUint64Test;
+
+
+TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
+ const BinaryOperation& bop = GetParam();
+ StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kWord32BinaryOperations));
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32.
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+ m.Return(t);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
+ m.Return(t);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Addition.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32AddWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const a0 = m.Int32Add(p0, p1);
+ m.Return(m.Int32Add(a0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Multiplication.
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const m0 = m.Int32Mul(p0, p1);
+ m.Return(m.Int32Mul(m0, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[0]->OutputAt(0)));
+ EXPECT_EQ(kX64Imul32, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulHigh) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Int32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64ImulHigh32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
+ ASSERT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32MulHigh) {
+ StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Uint32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64UmulHigh32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
+ ASSERT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
+}
+
+
+// -----------------------------------------------------------------------------
+// Word64Shl.
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
+ TRACED_FORRANGE(int64_t, x, 32, 63) {
+ StreamBuilder m(this, kMachInt64, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
+ TRACED_FORRANGE(int64_t, x, 32, 63) {
+ StreamBuilder m(this, kMachInt64, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
new file mode 100644
index 0000000000..e23557adc4
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
@@ -0,0 +1,162 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/zone-pool.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ZonePoolTest : public TestWithIsolate {
+ public:
+ ZonePoolTest() : zone_pool_(isolate()) {}
+
+ protected:
+ ZonePool* zone_pool() { return &zone_pool_; }
+
+ void ExpectForPool(size_t current, size_t max, size_t total) {
+ ASSERT_EQ(current, zone_pool()->GetCurrentAllocatedBytes());
+ ASSERT_EQ(max, zone_pool()->GetMaxAllocatedBytes());
+ ASSERT_EQ(total, zone_pool()->GetTotalAllocatedBytes());
+ }
+
+ void Expect(ZonePool::StatsScope* stats, size_t current, size_t max,
+ size_t total) {
+ ASSERT_EQ(current, stats->GetCurrentAllocatedBytes());
+ ASSERT_EQ(max, stats->GetMaxAllocatedBytes());
+ ASSERT_EQ(total, stats->GetTotalAllocatedBytes());
+ }
+
+ size_t Allocate(Zone* zone) {
+ size_t bytes = rng.NextInt(25) + 7;
+ int size_before = zone->allocation_size();
+ zone->New(static_cast<int>(bytes));
+ return static_cast<size_t>(zone->allocation_size() - size_before);
+ }
+
+ private:
+ ZonePool zone_pool_;
+ base::RandomNumberGenerator rng;
+};
+
+
+TEST_F(ZonePoolTest, Empty) {
+ ExpectForPool(0, 0, 0);
+ {
+ ZonePool::StatsScope stats(zone_pool());
+ Expect(&stats, 0, 0, 0);
+ }
+ ExpectForPool(0, 0, 0);
+ {
+ ZonePool::Scope scope(zone_pool());
+ scope.zone();
+ }
+ ExpectForPool(0, 0, 0);
+}
+
+
+TEST_F(ZonePoolTest, MultipleZonesWithDeletion) {
+ static const size_t kArraySize = 10;
+
+ ZonePool::Scope* scopes[kArraySize];
+
+ // Initialize.
+ size_t before_stats = 0;
+ for (size_t i = 0; i < kArraySize; ++i) {
+ scopes[i] = new ZonePool::Scope(zone_pool());
+ before_stats += Allocate(scopes[i]->zone()); // Add some stuff.
+ }
+
+ ExpectForPool(before_stats, before_stats, before_stats);
+
+ ZonePool::StatsScope stats(zone_pool());
+
+ size_t before_deletion = 0;
+ for (size_t i = 0; i < kArraySize; ++i) {
+ before_deletion += Allocate(scopes[i]->zone()); // Add some stuff.
+ }
+
+ Expect(&stats, before_deletion, before_deletion, before_deletion);
+ ExpectForPool(before_stats + before_deletion, before_stats + before_deletion,
+ before_stats + before_deletion);
+
+ // Delete the scopes and create new ones.
+ for (size_t i = 0; i < kArraySize; ++i) {
+ delete scopes[i];
+ scopes[i] = new ZonePool::Scope(zone_pool());
+ }
+
+ Expect(&stats, 0, before_deletion, before_deletion);
+ ExpectForPool(0, before_stats + before_deletion,
+ before_stats + before_deletion);
+
+ size_t after_deletion = 0;
+ for (size_t i = 0; i < kArraySize; ++i) {
+ after_deletion += Allocate(scopes[i]->zone()); // Add some stuff.
+ }
+
+ Expect(&stats, after_deletion, std::max(after_deletion, before_deletion),
+ before_deletion + after_deletion);
+ ExpectForPool(after_deletion,
+ std::max(after_deletion, before_stats + before_deletion),
+ before_stats + before_deletion + after_deletion);
+
+ // Cleanup.
+ for (size_t i = 0; i < kArraySize; ++i) {
+ delete scopes[i];
+ }
+
+ Expect(&stats, 0, std::max(after_deletion, before_deletion),
+ before_deletion + after_deletion);
+ ExpectForPool(0, std::max(after_deletion, before_stats + before_deletion),
+ before_stats + before_deletion + after_deletion);
+}
+
+
+TEST_F(ZonePoolTest, SimpleAllocationLoop) {
+ int runs = 20;
+ size_t total_allocated = 0;
+ size_t max_loop_allocation = 0;
+ ZonePool::StatsScope outer_stats(zone_pool());
+ {
+ ZonePool::Scope outer_scope(zone_pool());
+ size_t outer_allocated = 0;
+ for (int i = 0; i < runs; ++i) {
+ {
+ size_t bytes = Allocate(outer_scope.zone());
+ outer_allocated += bytes;
+ total_allocated += bytes;
+ }
+ ZonePool::StatsScope inner_stats(zone_pool());
+ size_t allocated = 0;
+ {
+ ZonePool::Scope inner_scope(zone_pool());
+ for (int j = 0; j < 20; ++j) {
+ size_t bytes = Allocate(inner_scope.zone());
+ allocated += bytes;
+ total_allocated += bytes;
+ max_loop_allocation =
+ std::max(max_loop_allocation, outer_allocated + allocated);
+ Expect(&inner_stats, allocated, allocated, allocated);
+ Expect(&outer_stats, outer_allocated + allocated, max_loop_allocation,
+ total_allocated);
+ ExpectForPool(outer_allocated + allocated, max_loop_allocation,
+ total_allocated);
+ }
+ }
+ Expect(&inner_stats, 0, allocated, allocated);
+ Expect(&outer_stats, outer_allocated, max_loop_allocation,
+ total_allocated);
+ ExpectForPool(outer_allocated, max_loop_allocation, total_allocated);
+ }
+ }
+ Expect(&outer_stats, 0, max_loop_allocation, total_allocated);
+ ExpectForPool(0, max_loop_allocation, total_allocated);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
new file mode 100644
index 0000000000..55dd6c6f58
--- /dev/null
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -0,0 +1,396 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class GCIdleTimeHandlerTest : public ::testing::Test {
+ public:
+ GCIdleTimeHandlerTest() {}
+ virtual ~GCIdleTimeHandlerTest() {}
+
+ GCIdleTimeHandler* handler() { return &handler_; }
+
+ GCIdleTimeHandler::HeapState DefaultHeapState() {
+ GCIdleTimeHandler::HeapState result;
+ result.contexts_disposed = 0;
+ result.size_of_objects = kSizeOfObjects;
+ result.incremental_marking_stopped = false;
+ result.can_start_incremental_marking = true;
+ result.sweeping_in_progress = false;
+ result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
+ result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
+ result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
+ result.used_new_space_size = 0;
+ result.new_space_capacity = kNewSpaceCapacity;
+ result.new_space_allocation_throughput_in_bytes_per_ms =
+ kNewSpaceAllocationThroughput;
+ return result;
+ }
+
+ static const size_t kSizeOfObjects = 100 * MB;
+ static const size_t kMarkCompactSpeed = 200 * KB;
+ static const size_t kMarkingSpeed = 200 * KB;
+ static const size_t kScavengeSpeed = 100 * KB;
+ static const size_t kNewSpaceCapacity = 1 * MB;
+ static const size_t kNewSpaceAllocationThroughput = 10 * KB;
+
+ private:
+ GCIdleTimeHandler handler_;
+};
+
+} // namespace
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeInitial) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(1, 0);
+ EXPECT_EQ(
+ static_cast<size_t>(GCIdleTimeHandler::kInitialConservativeMarkingSpeed *
+ GCIdleTimeHandler::kConservativeTimeRatio),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeNonZero) {
+ size_t marking_speed_in_bytes_per_millisecond = 100;
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ 1, marking_speed_in_bytes_per_millisecond);
+ EXPECT_EQ(static_cast<size_t>(marking_speed_in_bytes_per_millisecond *
+ GCIdleTimeHandler::kConservativeTimeRatio),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow1) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ 10, std::numeric_limits<size_t>::max());
+ EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ std::numeric_limits<size_t>::max(), 10);
+ EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
+ size_t size = 100 * MB;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
+ EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
+ time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
+ size_t size = 100 * MB;
+ size_t speed = 1 * MB;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+ EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
+ size_t size = std::numeric_limits<size_t>::max();
+ size_t speed = 1;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+ EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_in_ms = 16;
+ EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoScavengeFullNewSpace) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.used_new_space_size = kNewSpaceCapacity;
+ int idle_time_in_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoScavengeUnknownScavengeSpeed) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.used_new_space_size = kNewSpaceCapacity;
+ heap_state.scavenge_speed_in_bytes_per_ms = 0;
+ int idle_time_in_ms = 16;
+ EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.used_new_space_size = kNewSpaceCapacity;
+ heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
+ int idle_time_in_ms = 16;
+ EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.used_new_space_size = kNewSpaceCapacity;
+ heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
+ int idle_time_in_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
+ size_t idle_time_in_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_in_ms, 0, 0));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
+ size_t idle_time_in_ms = 1;
+ EXPECT_FALSE(GCIdleTimeHandler::ShouldDoMarkCompact(
+ idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms =
+ static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ heap_state.incremental_marking_stopped = true;
+ heap_state.size_of_objects = GCIdleTimeHandler::kSmallHeapSize / 2;
+ int idle_time_ms = 0;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ int idle_time_ms = 10;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+ static_cast<size_t>(action.parameter));
+ EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ int idle_time_ms = 10;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+ static_cast<size_t>(action.parameter));
+ EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we emulate incremental marking steps that finish with a
+ // full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ heap_state.can_start_incremental_marking = false;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ if (action.type == DONE) break;
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we try to emulate incremental marking steps the finish with
+ // a full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ heap_state.can_start_incremental_marking = false;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ heap_state.can_start_incremental_marking = true;
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, Scavenge) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ heap_state.used_new_space_size =
+ heap_state.new_space_capacity -
+ (kNewSpaceAllocationThroughput * idle_time_ms);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_SCAVENGE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ heap_state.can_start_incremental_marking = false;
+ heap_state.incremental_marking_stopped = true;
+ heap_state.used_new_space_size =
+ heap_state.new_space_capacity -
+ (kNewSpaceAllocationThroughput * idle_time_ms);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_SCAVENGE, action.type);
+ heap_state.used_new_space_size = 0;
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 0;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ if (action.type == DONE) break;
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we try to emulate incremental marking steps the finish with
+ // a full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ action = handler()->Compute(0, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
new file mode 100644
index 0000000000..d2c160e558
--- /dev/null
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-platform.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ virtual ~MockTask() { Die(); }
+ MOCK_METHOD0(Run, void());
+ MOCK_METHOD0(Die, void());
+};
+
+} // namespace
+
+
+TEST(DefaultPlatformTest, PumpMessageLoop) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatform platform;
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ platform.CallOnForegroundThread(isolate, task);
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
new file mode 100644
index 0000000000..9a186589f7
--- /dev/null
+++ b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/base/platform/platform.h"
+#include "src/libplatform/task-queue.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ MOCK_METHOD0(Run, void());
+};
+
+
+class TaskQueueThread FINAL : public base::Thread {
+ public:
+ explicit TaskQueueThread(TaskQueue* queue)
+ : Thread(Options("libplatform TaskQueueThread")), queue_(queue) {}
+
+ virtual void Run() OVERRIDE { EXPECT_THAT(queue_->GetNext(), IsNull()); }
+
+ private:
+ TaskQueue* queue_;
+};
+
+} // namespace
+
+
+TEST(TaskQueueTest, Basic) {
+ TaskQueue queue;
+ MockTask task;
+ queue.Append(&task);
+ EXPECT_EQ(&task, queue.GetNext());
+ queue.Terminate();
+ EXPECT_THAT(queue.GetNext(), IsNull());
+}
+
+
+TEST(TaskQueueTest, TerminateMultipleReaders) {
+ TaskQueue queue;
+ TaskQueueThread thread1(&queue);
+ TaskQueueThread thread2(&queue);
+ thread1.Start();
+ thread2.Start();
+ queue.Terminate();
+ thread1.Join();
+ thread2.Join();
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
new file mode 100644
index 0000000000..175b311666
--- /dev/null
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/libplatform/task-queue.h"
+#include "src/libplatform/worker-thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ virtual ~MockTask() { Die(); }
+ MOCK_METHOD0(Run, void());
+ MOCK_METHOD0(Die, void());
+};
+
+} // namespace
+
+
+TEST(WorkerThreadTest, Basic) {
+ static const size_t kNumTasks = 10;
+
+ TaskQueue queue;
+ for (size_t i = 0; i < kNumTasks; ++i) {
+ InSequence s;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ queue.Append(task);
+ }
+
+ WorkerThread thread1(&queue);
+ WorkerThread thread2(&queue);
+
+ // TaskQueue DCHECKS that it's empty in its destructor.
+ queue.Terminate();
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
new file mode 100644
index 0000000000..8c361ddc36
--- /dev/null
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+class DefaultPlatformEnvironment FINAL : public ::testing::Environment {
+ public:
+ DefaultPlatformEnvironment() : platform_(NULL) {}
+ ~DefaultPlatformEnvironment() {}
+
+ virtual void SetUp() OVERRIDE {
+ EXPECT_EQ(NULL, platform_);
+ platform_ = v8::platform::CreateDefaultPlatform();
+ ASSERT_TRUE(platform_ != NULL);
+ v8::V8::InitializePlatform(platform_);
+ ASSERT_TRUE(v8::V8::Initialize());
+ }
+
+ virtual void TearDown() OVERRIDE {
+ ASSERT_TRUE(platform_ != NULL);
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
+ delete platform_;
+ platform_ = NULL;
+ }
+
+ private:
+ v8::Platform* platform_;
+};
+
+} // namespace
+
+
+int main(int argc, char** argv) {
+ testing::InitGoogleMock(&argc, argv);
+ testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ return RUN_ALL_TESTS();
+}
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
new file mode 100644
index 0000000000..b2284990c4
--- /dev/null
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/base/platform/time.h"
+#include "src/flags.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+
+std::ostream& operator<<(std::ostream& os, ExternalArrayType type) {
+ switch (type) {
+ case kExternalInt8Array:
+ return os << "ExternalInt8Array";
+ case kExternalUint8Array:
+ return os << "ExternalUint8Array";
+ case kExternalInt16Array:
+ return os << "ExternalInt16Array";
+ case kExternalUint16Array:
+ return os << "ExternalUint16Array";
+ case kExternalInt32Array:
+ return os << "ExternalInt32Array";
+ case kExternalUint32Array:
+ return os << "ExternalUint32Array";
+ case kExternalFloat32Array:
+ return os << "ExternalFloat32Array";
+ case kExternalFloat64Array:
+ return os << "ExternalFloat64Array";
+ case kExternalUint8ClampedArray:
+ return os << "ExternalUint8ClampedArray";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+// static
+Isolate* TestWithIsolate::isolate_ = NULL;
+
+
+TestWithIsolate::TestWithIsolate()
+ : isolate_scope_(isolate()), handle_scope_(isolate()) {}
+
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+// static
+void TestWithIsolate::SetUpTestCase() {
+ Test::SetUpTestCase();
+ EXPECT_EQ(NULL, isolate_);
+ isolate_ = v8::Isolate::New();
+ EXPECT_TRUE(isolate_ != NULL);
+}
+
+
+// static
+void TestWithIsolate::TearDownTestCase() {
+ ASSERT_TRUE(isolate_ != NULL);
+ isolate_->Dispose();
+ isolate_ = NULL;
+ Test::TearDownTestCase();
+}
+
+
+TestWithContext::TestWithContext()
+ : context_(Context::New(isolate())), context_scope_(context_) {}
+
+
+TestWithContext::~TestWithContext() {}
+
+
+namespace base {
+namespace {
+
+inline int64_t GetRandomSeedFromFlag(int random_seed) {
+ return random_seed ? random_seed : TimeTicks::Now().ToInternalValue();
+}
+
+} // namespace
+
+TestWithRandomNumberGenerator::TestWithRandomNumberGenerator()
+ : rng_(GetRandomSeedFromFlag(internal::FLAG_random_seed)) {}
+
+
+TestWithRandomNumberGenerator::~TestWithRandomNumberGenerator() {}
+
+} // namespace base
+
+
+namespace internal {
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
+
+
+TestWithZone::~TestWithZone() {}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
new file mode 100644
index 0000000000..2025b8a8d8
--- /dev/null
+++ b/deps/v8/test/unittests/test-utils.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_TEST_UTILS_H_
+#define V8_UNITTESTS_TEST_UTILS_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/zone.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+
+std::ostream& operator<<(std::ostream&, ExternalArrayType);
+
+
+class TestWithIsolate : public ::testing::Test {
+ public:
+ TestWithIsolate();
+ virtual ~TestWithIsolate();
+
+ Isolate* isolate() const { return isolate_; }
+
+ static void SetUpTestCase();
+ static void TearDownTestCase();
+
+ private:
+ static Isolate* isolate_;
+ Isolate::Scope isolate_scope_;
+ HandleScope handle_scope_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithContext : public virtual TestWithIsolate {
+ public:
+ TestWithContext();
+ virtual ~TestWithContext();
+
+ const Local<Context>& context() const { return context_; }
+
+ private:
+ Local<Context> context_;
+ Context::Scope context_scope_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithContext);
+};
+
+
+namespace base {
+
+class TestWithRandomNumberGenerator : public ::testing::Test {
+ public:
+ TestWithRandomNumberGenerator();
+ virtual ~TestWithRandomNumberGenerator();
+
+ RandomNumberGenerator* rng() { return &rng_; }
+
+ private:
+ RandomNumberGenerator rng_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithRandomNumberGenerator);
+};
+
+} // namespace base
+
+
+namespace internal {
+
+// Forward declarations.
+class Factory;
+
+
+class TestWithIsolate : public virtual ::v8::TestWithIsolate {
+ public:
+ TestWithIsolate() {}
+ virtual ~TestWithIsolate();
+
+ Factory* factory() const;
+ Isolate* isolate() const {
+ return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithZone : public TestWithIsolate {
+ public:
+ TestWithZone() : zone_(isolate()) {}
+ virtual ~TestWithZone();
+
+ Zone* zone() { return &zone_; }
+
+ private:
+ Zone zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithZone);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_TEST_UTILS_H_
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
new file mode 100644
index 0000000000..a881e46e0a
--- /dev/null
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -0,0 +1,126 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'unittests',
+ 'type': 'executable',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ '../../testing/gmock.gyp:gmock',
+ '../../testing/gtest.gyp:gtest',
+ '../../tools/gyp/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'base/bits-unittest.cc',
+ 'base/cpu-unittest.cc',
+ 'base/division-by-constant-unittest.cc',
+ 'base/flags-unittest.cc',
+ 'base/functional-unittest.cc',
+ 'base/platform/condition-variable-unittest.cc',
+ 'base/platform/mutex-unittest.cc',
+ 'base/platform/platform-unittest.cc',
+ 'base/platform/semaphore-unittest.cc',
+ 'base/platform/time-unittest.cc',
+ 'base/sys-info-unittest.cc',
+ 'base/utils/random-number-generator-unittest.cc',
+ 'char-predicates-unittest.cc',
+ 'compiler/change-lowering-unittest.cc',
+ 'compiler/common-operator-unittest.cc',
+ 'compiler/compiler-test-utils.h',
+ 'compiler/diamond-unittest.cc',
+ 'compiler/graph-reducer-unittest.cc',
+ 'compiler/graph-unittest.cc',
+ 'compiler/graph-unittest.h',
+ 'compiler/instruction-selector-unittest.cc',
+ 'compiler/instruction-selector-unittest.h',
+ 'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-operator-unittest.cc',
+ 'compiler/js-typed-lowering-unittest.cc',
+ 'compiler/machine-operator-reducer-unittest.cc',
+ 'compiler/machine-operator-unittest.cc',
+ 'compiler/node-test-utils.cc',
+ 'compiler/node-test-utils.h',
+ 'compiler/register-allocator-unittest.cc',
+ 'compiler/select-lowering-unittest.cc',
+ 'compiler/simplified-operator-reducer-unittest.cc',
+ 'compiler/simplified-operator-unittest.cc',
+ 'compiler/value-numbering-reducer-unittest.cc',
+ 'compiler/zone-pool-unittest.cc',
+ 'libplatform/default-platform-unittest.cc',
+ 'libplatform/task-queue-unittest.cc',
+ 'libplatform/worker-thread-unittest.cc',
+ 'heap/gc-idle-time-handler-unittest.cc',
+ 'run-all-unittests.cc',
+ 'test-utils.h',
+ 'test-utils.cc',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="arm"', {
+ 'sources': [ ### gcmole(arch:arm) ###
+ 'compiler/arm/instruction-selector-arm-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ 'compiler/arm64/instruction-selector-arm64-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [ ### gcmole(arch:ia32) ###
+ 'compiler/ia32/instruction-selector-ia32-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="mipsel"', {
+ 'sources': [ ### gcmole(arch:mipsel) ###
+ 'compiler/mips/instruction-selector-mips-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ 'compiler/x64/instruction-selector-x64-unittest.cc',
+ ],
+ }],
+ ['component=="shared_library"', {
+ # compiler-unittests can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ 'conditions': [
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+ },
+ {
+ 'dependencies': [
+ '../../tools/gyp/v8.gyp:v8_nosnapshot',
+ ],
+ }],
+ ],
+ }, {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ }],
+ ['os_posix == 1', {
+ # TODO(svenpanne): This is a temporary work-around to fix the warnings
+ # that show up because we use -std=gnu++0x instead of -std=c++11.
+ 'cflags!': [
+ '-pedantic',
+ ],
+ 'direct_dependent_settings': {
+ 'cflags!': [
+ '-pedantic',
+ ],
+ },
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
new file mode 100644
index 0000000000..d439913ccf
--- /dev/null
+++ b/deps/v8/test/unittests/unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]