aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/test-assembler-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/test-assembler-arm64.cc')
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc1058
1 files changed, 568 insertions, 490 deletions
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index d49f8c8974..1f6b732808 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -42,6 +42,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/diagnostics/arm64/disasm-arm64.h"
#include "src/execution/arm64/simulator-arm64.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
@@ -117,22 +118,23 @@ static void InitializeVM() {
#ifdef USE_SIMULATOR
// Run tests with the simulator.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK_NOT_NULL(isolate); \
- std::unique_ptr<byte[]> owned_buf{new byte[buf_size]}; \
- byte* buf = owned_buf.get(); \
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
- ExternalAssemblerBuffer(buf, buf_size)); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- Simulator simulator(decoder); \
- std::unique_ptr<PrintDisassembler> pdis; \
- RegisterDump core; \
- if (i::FLAG_trace_sim) { \
- pdis.reset(new PrintDisassembler(stdout)); \
- decoder->PrependVisitor(pdis.get()); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK_NOT_NULL(isolate); \
+ std::unique_ptr<byte[]> owned_buf{new byte[buf_size]}; \
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
+ ExternalAssemblerBuffer(owned_buf.get(), buf_size)); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ std::unique_ptr<PrintDisassembler> pdis; \
+ RegisterDump core; \
+ HandleScope handle_scope(isolate); \
+ Handle<Code> code; \
+ if (i::FLAG_trace_sim) { \
+ pdis.reset(new PrintDisassembler(stdout)); \
+ decoder->PrependVisitor(pdis.get()); \
}
// Reset the assembler and simulator, so that instructions can be generated,
@@ -154,17 +156,18 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+#define RUN() simulator.RunFrom(reinterpret_cast<Instruction*>(code->entry()))
-#define END() \
- __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#else // ifdef USE_SIMULATOR.
@@ -176,8 +179,8 @@ static void InitializeVM() {
auto owned_buf = AllocateAssemblerBuffer(buf_size); \
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
owned_buf->CreateView()); \
- uint8_t* buf = owned_buf->start(); \
- USE(buf); \
+ HandleScope handle_scope(isolate); \
+ Handle<Code> code; \
RegisterDump core;
#define RESET() \
@@ -194,20 +197,21 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- owned_buf->MakeExecutable(); \
- { \
- auto* test_function = bit_cast<void (*)()>(buf); \
- test_function(); \
+#define RUN() \
+ { \
+ auto f = GeneratedCode<void>::FromCode(*code); \
+ f.Call(); \
}
-#define END() \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#endif // ifdef USE_SIMULATOR.
@@ -227,6 +231,9 @@ static void InitializeVM() {
#define CHECK_EQUAL_64(expected, result) \
CHECK(Equal64(expected, &core, result))
+#define CHECK_FULL_HEAP_OBJECT_IN_REGISTER(expected, result) \
+ CHECK(Equal64(expected->ptr(), &core, result))
+
#define CHECK_EQUAL_FP64(expected, result) \
CHECK(EqualFP64(expected, &core, result))
@@ -370,7 +377,7 @@ TEST(mov) {
__ Mov(w13, Operand(w11, LSL, 1));
__ Mov(x14, Operand(x12, LSL, 2));
__ Mov(w15, Operand(w11, LSR, 3));
- __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(x28, Operand(x12, LSR, 4));
__ Mov(w19, Operand(w11, ASR, 11));
__ Mov(x20, Operand(x12, ASR, 12));
__ Mov(w21, Operand(w11, ROR, 13));
@@ -399,7 +406,7 @@ TEST(mov) {
CHECK_EQUAL_64(0x00001FFE, x13);
CHECK_EQUAL_64(0x0000000000003FFCUL, x14);
CHECK_EQUAL_64(0x000001FF, x15);
- CHECK_EQUAL_64(0x00000000000000FFUL, x18);
+ CHECK_EQUAL_64(0x00000000000000FFUL, x28);
CHECK_EQUAL_64(0x00000001, x19);
CHECK_EQUAL_64(0x0, x20);
CHECK_EQUAL_64(0x7FF80000, x21);
@@ -517,7 +524,7 @@ TEST(mov_imm_x) {
__ Mov(x13, 0x0000000000001234L);
__ Mov(x14, 0x0000000012345678L);
__ Mov(x15, 0x0000123400005678L);
- __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x30, 0x1234000000005678L);
__ Mov(x19, 0x1234000056780000L);
__ Mov(x20, 0x1234567800000000L);
__ Mov(x21, 0x1234000000000000L);
@@ -547,7 +554,7 @@ TEST(mov_imm_x) {
CHECK_EQUAL_64(0x0000000000001234L, x13);
CHECK_EQUAL_64(0x0000000012345678L, x14);
CHECK_EQUAL_64(0x0000123400005678L, x15);
- CHECK_EQUAL_64(0x1234000000005678L, x18);
+ CHECK_EQUAL_64(0x1234000000005678L, x30);
CHECK_EQUAL_64(0x1234000056780000L, x19);
CHECK_EQUAL_64(0x1234567800000000L, x20);
CHECK_EQUAL_64(0x1234000000000000L, x21);
@@ -1095,27 +1102,27 @@ TEST(mul) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x15, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mul(w0, w16, w16);
__ Mul(w1, w16, w17);
- __ Mul(w2, w17, w18);
- __ Mul(w3, w18, w19);
+ __ Mul(w2, w17, w15);
+ __ Mul(w3, w15, w19);
__ Mul(x4, x16, x16);
- __ Mul(x5, x17, x18);
- __ Mul(x6, x18, x19);
+ __ Mul(x5, x17, x15);
+ __ Mul(x6, x15, x19);
__ Mul(x7, x19, x19);
- __ Smull(x8, w17, w18);
- __ Smull(x9, w18, w18);
+ __ Smull(x8, w17, w15);
+ __ Smull(x9, w15, w15);
__ Smull(x10, w19, w19);
__ Mneg(w11, w16, w16);
__ Mneg(w12, w16, w17);
- __ Mneg(w13, w17, w18);
- __ Mneg(w14, w18, w19);
+ __ Mneg(w13, w17, w15);
+ __ Mneg(w14, w15, w19);
__ Mneg(x20, x16, x16);
- __ Mneg(x21, x17, x18);
- __ Mneg(x22, x18, x19);
+ __ Mneg(x21, x17, x15);
+ __ Mneg(x22, x15, x19);
__ Mneg(x23, x19, x19);
END();
@@ -1170,33 +1177,33 @@ TEST(madd) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Madd(w0, w16, w16, w16);
__ Madd(w1, w16, w16, w17);
- __ Madd(w2, w16, w16, w18);
+ __ Madd(w2, w16, w16, w28);
__ Madd(w3, w16, w16, w19);
__ Madd(w4, w16, w17, w17);
- __ Madd(w5, w17, w17, w18);
+ __ Madd(w5, w17, w17, w28);
__ Madd(w6, w17, w17, w19);
- __ Madd(w7, w17, w18, w16);
- __ Madd(w8, w17, w18, w18);
- __ Madd(w9, w18, w18, w17);
- __ Madd(w10, w18, w19, w18);
+ __ Madd(w7, w17, w28, w16);
+ __ Madd(w8, w17, w28, w28);
+ __ Madd(w9, w28, w28, w17);
+ __ Madd(w10, w28, w19, w28);
__ Madd(w11, w19, w19, w19);
__ Madd(x12, x16, x16, x16);
__ Madd(x13, x16, x16, x17);
- __ Madd(x14, x16, x16, x18);
+ __ Madd(x14, x16, x16, x28);
__ Madd(x15, x16, x16, x19);
__ Madd(x20, x16, x17, x17);
- __ Madd(x21, x17, x17, x18);
+ __ Madd(x21, x17, x17, x28);
__ Madd(x22, x17, x17, x19);
- __ Madd(x23, x17, x18, x16);
- __ Madd(x24, x17, x18, x18);
- __ Madd(x25, x18, x18, x17);
- __ Madd(x26, x18, x19, x18);
+ __ Madd(x23, x17, x28, x16);
+ __ Madd(x24, x17, x28, x28);
+ __ Madd(x25, x28, x28, x17);
+ __ Madd(x26, x28, x19, x28);
__ Madd(x27, x19, x19, x19);
END();
@@ -1237,33 +1244,33 @@ TEST(msub) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Msub(w0, w16, w16, w16);
__ Msub(w1, w16, w16, w17);
- __ Msub(w2, w16, w16, w18);
+ __ Msub(w2, w16, w16, w28);
__ Msub(w3, w16, w16, w19);
__ Msub(w4, w16, w17, w17);
- __ Msub(w5, w17, w17, w18);
+ __ Msub(w5, w17, w17, w28);
__ Msub(w6, w17, w17, w19);
- __ Msub(w7, w17, w18, w16);
- __ Msub(w8, w17, w18, w18);
- __ Msub(w9, w18, w18, w17);
- __ Msub(w10, w18, w19, w18);
+ __ Msub(w7, w17, w28, w16);
+ __ Msub(w8, w17, w28, w28);
+ __ Msub(w9, w28, w28, w17);
+ __ Msub(w10, w28, w19, w28);
__ Msub(w11, w19, w19, w19);
__ Msub(x12, x16, x16, x16);
__ Msub(x13, x16, x16, x17);
- __ Msub(x14, x16, x16, x18);
+ __ Msub(x14, x16, x16, x28);
__ Msub(x15, x16, x16, x19);
__ Msub(x20, x16, x17, x17);
- __ Msub(x21, x17, x17, x18);
+ __ Msub(x21, x17, x17, x28);
__ Msub(x22, x17, x17, x19);
- __ Msub(x23, x17, x18, x16);
- __ Msub(x24, x17, x18, x18);
- __ Msub(x25, x18, x18, x17);
- __ Msub(x26, x18, x19, x18);
+ __ Msub(x23, x17, x28, x16);
+ __ Msub(x24, x17, x28, x28);
+ __ Msub(x25, x28, x28, x17);
+ __ Msub(x26, x28, x19, x28);
__ Msub(x27, x19, x19, x19);
END();
@@ -1349,17 +1356,17 @@ TEST(smaddl_umaddl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
- __ Smaddl(x9, w17, w18, x20);
- __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x9, w17, w28, x20);
+ __ Smaddl(x10, w28, w28, x20);
__ Smaddl(x11, w19, w19, x20);
__ Smaddl(x12, w19, w19, x21);
- __ Umaddl(x13, w17, w18, x20);
- __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x13, w17, w28, x20);
+ __ Umaddl(x14, w28, w28, x20);
__ Umaddl(x15, w19, w19, x20);
__ Umaddl(x22, w19, w19, x21);
END();
@@ -1382,17 +1389,17 @@ TEST(smsubl_umsubl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
- __ Smsubl(x9, w17, w18, x20);
- __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x9, w17, w28, x20);
+ __ Smsubl(x10, w28, w28, x20);
__ Smsubl(x11, w19, w19, x20);
__ Smsubl(x12, w19, w19, x21);
- __ Umsubl(x13, w17, w18, x20);
- __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x13, w17, w28, x20);
+ __ Umsubl(x14, w28, w28, x20);
__ Umsubl(x15, w19, w19, x20);
__ Umsubl(x22, w19, w19, x21);
END();
@@ -1416,7 +1423,7 @@ TEST(div) {
START();
__ Mov(x16, 1);
__ Mov(x17, 0xFFFFFFFF);
- __ Mov(x18, 0xFFFFFFFFFFFFFFFFUL);
+ __ Mov(x30, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
__ Mov(x21, 2);
@@ -1425,13 +1432,13 @@ TEST(div) {
__ Udiv(w1, w17, w16);
__ Sdiv(w2, w16, w16);
__ Sdiv(w3, w16, w17);
- __ Sdiv(w4, w17, w18);
+ __ Sdiv(w4, w17, w30);
__ Udiv(x5, x16, x16);
- __ Udiv(x6, x17, x18);
+ __ Udiv(x6, x17, x30);
__ Sdiv(x7, x16, x16);
__ Sdiv(x8, x16, x17);
- __ Sdiv(x9, x17, x18);
+ __ Sdiv(x9, x17, x30);
__ Udiv(w10, w19, w21);
__ Sdiv(w11, w19, w21);
@@ -1442,16 +1449,16 @@ TEST(div) {
__ Udiv(w22, w19, w17);
__ Sdiv(w23, w19, w17);
- __ Udiv(x24, x20, x18);
- __ Sdiv(x25, x20, x18);
+ __ Udiv(x24, x20, x30);
+ __ Sdiv(x25, x20, x30);
__ Udiv(x26, x16, x21);
__ Sdiv(x27, x16, x21);
- __ Udiv(x28, x18, x21);
- __ Sdiv(x29, x18, x21);
+ __ Udiv(x28, x30, x21);
+ __ Sdiv(x29, x30, x21);
__ Mov(x17, 0);
- __ Udiv(w18, w16, w17);
+ __ Udiv(w30, w16, w17);
__ Sdiv(w19, w16, w17);
__ Udiv(x20, x16, x17);
__ Sdiv(x21, x16, x17);
@@ -1483,7 +1490,7 @@ TEST(div) {
CHECK_EQUAL_64(0, x27);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28);
CHECK_EQUAL_64(0, x29);
- CHECK_EQUAL_64(0, x18);
+ CHECK_EQUAL_64(0, x30);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0, x21);
@@ -1899,17 +1906,17 @@ TEST(compare_branch) {
__ Mov(x3, 1);
__ Bind(&nzf_end);
- __ Mov(x18, 0xFFFFFFFF00000000UL);
+ __ Mov(x19, 0xFFFFFFFF00000000UL);
Label a, a_end;
- __ Cbz(w18, &a);
+ __ Cbz(w19, &a);
__ B(&a_end);
__ Bind(&a);
__ Mov(x4, 1);
__ Bind(&a_end);
Label b, b_end;
- __ Cbnz(w18, &b);
+ __ Cbnz(w19, &b);
__ B(&b_end);
__ Bind(&b);
__ Mov(x5, 1);
@@ -2357,17 +2364,17 @@ TEST(ldr_str_offset) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Ldr(w0, MemOperand(x17));
- __ Str(w0, MemOperand(x18));
+ __ Str(w0, MemOperand(x19));
__ Ldr(w1, MemOperand(x17, 4));
- __ Str(w1, MemOperand(x18, 12));
+ __ Str(w1, MemOperand(x19, 12));
__ Ldr(x2, MemOperand(x17, 8));
- __ Str(x2, MemOperand(x18, 16));
+ __ Str(x2, MemOperand(x19, 16));
__ Ldrb(w3, MemOperand(x17, 1));
- __ Strb(w3, MemOperand(x18, 25));
+ __ Strb(w3, MemOperand(x19, 25));
__ Ldrh(w4, MemOperand(x17, 2));
- __ Strh(w4, MemOperand(x18, 33));
+ __ Strh(w4, MemOperand(x19, 33));
END();
RUN();
@@ -2383,7 +2390,7 @@ TEST(ldr_str_offset) {
CHECK_EQUAL_64(0x7654, x4);
CHECK_EQUAL_64(0x765400, dst[4]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base, x18);
+ CHECK_EQUAL_64(dst_base, x19);
}
TEST(ldr_str_wide) {
@@ -2443,7 +2450,7 @@ TEST(ldr_str_preindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base + 16);
@@ -2453,7 +2460,7 @@ TEST(ldr_str_preindex) {
__ Mov(x25, src_base);
__ Mov(x26, dst_base);
__ Ldr(w0, MemOperand(x17, 4, PreIndex));
- __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Str(w0, MemOperand(x28, 12, PreIndex));
__ Ldr(x1, MemOperand(x19, 8, PreIndex));
__ Str(x1, MemOperand(x20, 16, PreIndex));
__ Ldr(w2, MemOperand(x21, -4, PreIndex));
@@ -2477,7 +2484,7 @@ TEST(ldr_str_preindex) {
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 4, x17);
- CHECK_EQUAL_64(dst_base + 12, x18);
+ CHECK_EQUAL_64(dst_base + 12, x28);
CHECK_EQUAL_64(src_base + 8, x19);
CHECK_EQUAL_64(dst_base + 16, x20);
CHECK_EQUAL_64(src_base + 12, x21);
@@ -2499,7 +2506,7 @@ TEST(ldr_str_postindex) {
START();
__ Mov(x17, src_base + 4);
- __ Mov(x18, dst_base + 12);
+ __ Mov(x28, dst_base + 12);
__ Mov(x19, src_base + 8);
__ Mov(x20, dst_base + 16);
__ Mov(x21, src_base + 8);
@@ -2509,7 +2516,7 @@ TEST(ldr_str_postindex) {
__ Mov(x25, src_base + 3);
__ Mov(x26, dst_base + 41);
__ Ldr(w0, MemOperand(x17, 4, PostIndex));
- __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Str(w0, MemOperand(x28, 12, PostIndex));
__ Ldr(x1, MemOperand(x19, 8, PostIndex));
__ Str(x1, MemOperand(x20, 16, PostIndex));
__ Ldr(x2, MemOperand(x21, -8, PostIndex));
@@ -2533,7 +2540,7 @@ TEST(ldr_str_postindex) {
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 8, x17);
- CHECK_EQUAL_64(dst_base + 24, x18);
+ CHECK_EQUAL_64(dst_base + 24, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base, x21);
@@ -2591,7 +2598,7 @@ TEST(load_store_regoffset) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x21, src_base + 3 * sizeof(src[0]));
__ Mov(x19, dst_base + 3 * sizeof(dst[0]));
__ Mov(x20, dst_base + 4 * sizeof(dst[0]));
__ Mov(x24, 0);
@@ -2603,9 +2610,9 @@ TEST(load_store_regoffset) {
__ Ldr(w0, MemOperand(x16, x24));
__ Ldr(x1, MemOperand(x16, x25));
- __ Ldr(w2, MemOperand(x18, x26));
- __ Ldr(w3, MemOperand(x18, x27, SXTW));
- __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Ldr(w2, MemOperand(x21, x26));
+ __ Ldr(w3, MemOperand(x21, x27, SXTW));
+ __ Ldr(w4, MemOperand(x21, x28, SXTW, 2));
__ Str(w0, MemOperand(x17, x24));
__ Str(x1, MemOperand(x17, x25));
__ Str(w2, MemOperand(x20, x29, SXTW, 2));
@@ -2635,13 +2642,13 @@ TEST(load_store_float) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(s0, MemOperand(x17, sizeof(src[0])));
- __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(s0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2657,7 +2664,7 @@ TEST(load_store_float) {
CHECK_EQUAL_FP32(3.0, s2);
CHECK_EQUAL_FP32(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2675,13 +2682,13 @@ TEST(load_store_double) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(d0, MemOperand(x17, sizeof(src[0])));
- __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(d0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2697,7 +2704,7 @@ TEST(load_store_double) {
CHECK_EQUAL_FP64(3.0, d2);
CHECK_EQUAL_FP64(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2715,13 +2722,13 @@ TEST(load_store_b) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(b0, MemOperand(x17, sizeof(src[0])));
- __ Str(b0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(b0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2737,7 +2744,7 @@ TEST(load_store_b) {
CHECK_EQUAL_128(0, 0x34, q2);
CHECK_EQUAL_64(0x34, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2755,13 +2762,13 @@ TEST(load_store_h) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(h0, MemOperand(x17, sizeof(src[0])));
- __ Str(h0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(h0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2777,7 +2784,7 @@ TEST(load_store_h) {
CHECK_EQUAL_128(0, 0x3456, q2);
CHECK_EQUAL_64(0x3456, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2800,13 +2807,13 @@ TEST(load_store_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(q0, MemOperand(x17, 16));
- __ Str(q0, MemOperand(x18, 16, PostIndex));
+ __ Str(q0, MemOperand(x28, 16, PostIndex));
__ Ldr(q1, MemOperand(x19, 16, PostIndex));
__ Str(q1, MemOperand(x20, 32, PreIndex));
__ Ldr(q2, MemOperand(x21, 32, PreIndex));
@@ -2825,7 +2832,7 @@ TEST(load_store_q) {
CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]);
CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base + 32, x21);
@@ -2892,7 +2899,7 @@ TEST(neon_ld1_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -2900,7 +2907,7 @@ TEST(neon_ld1_d_postindex) {
__ Mov(x23, 1);
__ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.
__ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex));
- __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x18, 16, PostIndex));
+ __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
MemOperand(x20, 32, PostIndex));
@@ -2931,7 +2938,7 @@ TEST(neon_ld1_d_postindex) {
CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 16, x18);
+ CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -2991,13 +2998,13 @@ TEST(neon_ld1_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex));
+ __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(),
MemOperand(x20, 64, PostIndex));
@@ -3022,7 +3029,7 @@ TEST(neon_ld1_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
@@ -3135,13 +3142,13 @@ TEST(neon_ld2_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17, x22, PostIndex));
- __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x18, 16, PostIndex));
+ __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld2(v5.V4H(), v6.V4H(), MemOperand(x19, 16, PostIndex));
__ Ld2(v16.V2S(), v17.V2S(), MemOperand(x20, 16, PostIndex));
__ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex));
@@ -3160,7 +3167,7 @@ TEST(neon_ld2_d_postindex) {
CHECK_EQUAL_128(0, 0x131211100B0A0908, q0);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 16, x18);
+ CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 16, x19);
CHECK_EQUAL_64(src_base + 3 + 16, x20);
CHECK_EQUAL_64(src_base + 4 + 16, x21);
@@ -3215,13 +3222,13 @@ TEST(neon_ld2_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex));
+ __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld2(v6.V8H(), v7.V8H(), MemOperand(x19, 32, PostIndex));
__ Ld2(v16.V4S(), v17.V4S(), MemOperand(x20, 32, PostIndex));
__ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex));
@@ -3241,7 +3248,7 @@ TEST(neon_ld2_q_postindex) {
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -3337,7 +3344,7 @@ TEST(neon_ld2_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -3351,7 +3358,7 @@ TEST(neon_ld2_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld2(v2.H(), v3.H(), i, MemOperand(x18, 4, PostIndex));
+ __ Ld2(v2.H(), v3.H(), i, MemOperand(x28, 4, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -3409,7 +3416,7 @@ TEST(neon_ld2_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
CHECK_EQUAL_64(src_base + 32, x17);
- CHECK_EQUAL_64(src_base + 32, x18);
+ CHECK_EQUAL_64(src_base + 32, x28);
CHECK_EQUAL_64(src_base + 32, x19);
CHECK_EQUAL_64(src_base + 32, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -3430,7 +3437,6 @@ TEST(neon_ld2_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17));
__ Add(x17, x17, 2);
__ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17));
@@ -3476,12 +3482,12 @@ TEST(neon_ld2_alllanes_postindex) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17, 2, PostIndex));
- __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17, 4, PostIndex));
- __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17, 8, PostIndex));
__ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17, 16, PostIndex));
END();
@@ -3554,13 +3560,13 @@ TEST(neon_ld3_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17, x22, PostIndex));
- __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x18, 24, PostIndex));
+ __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x28, 24, PostIndex));
__ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld3(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x20, 24, PostIndex));
__ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex));
@@ -3585,7 +3591,7 @@ TEST(neon_ld3_d_postindex) {
CHECK_EQUAL_128(0, 0x1B1A19180F0E0D0C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 24, x18);
+ CHECK_EQUAL_64(src_base + 1 + 24, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 24, x20);
CHECK_EQUAL_64(src_base + 4 + 24, x21);
@@ -3645,14 +3651,14 @@ TEST(neon_ld3_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x18, 48, PostIndex));
+ __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x28, 48, PostIndex));
__ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x20, 48, PostIndex));
__ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex));
@@ -3677,7 +3683,7 @@ TEST(neon_ld3_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 48, x18);
+ CHECK_EQUAL_64(src_base + 1 + 48, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 48, x20);
CHECK_EQUAL_64(src_base + 4 + 48, x21);
@@ -3781,7 +3787,7 @@ TEST(neon_ld3_lane_postindex) {
// Test loading whole register by element.
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -3793,7 +3799,7 @@ TEST(neon_ld3_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x18, 6, PostIndex));
+ __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x28, 6, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -3863,7 +3869,7 @@ TEST(neon_ld3_lane_postindex) {
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23);
CHECK_EQUAL_64(src_base + 48, x17);
- CHECK_EQUAL_64(src_base + 48, x18);
+ CHECK_EQUAL_64(src_base + 48, x28);
CHECK_EQUAL_64(src_base + 48, x19);
CHECK_EQUAL_64(src_base + 48, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -3884,7 +3890,6 @@ TEST(neon_ld3_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17));
__ Add(x17, x17, 3);
__ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17));
@@ -3934,17 +3939,15 @@ TEST(neon_ld3_alllanes_postindex) {
src[i] = i;
}
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
- __ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17, 3, PostIndex));
- __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17, 6, PostIndex));
- __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17, 12, PostIndex));
__ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17, 24, PostIndex));
END();
@@ -4027,7 +4030,7 @@ TEST(neon_ld4_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -4035,7 +4038,7 @@ TEST(neon_ld4_d_postindex) {
__ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(),
MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(),
- MemOperand(x18, 32, PostIndex));
+ MemOperand(x28, 32, PostIndex));
__ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(),
MemOperand(x19, 32, PostIndex));
__ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(),
@@ -4068,7 +4071,7 @@ TEST(neon_ld4_d_postindex) {
CHECK_EQUAL_128(0, 0x2322212013121110, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -4133,7 +4136,7 @@ TEST(neon_ld4_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -4142,7 +4145,7 @@ TEST(neon_ld4_q_postindex) {
__ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(),
- MemOperand(x18, 64, PostIndex));
+ MemOperand(x28, 64, PostIndex));
__ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
MemOperand(x19, 64, PostIndex));
__ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(),
@@ -4175,7 +4178,7 @@ TEST(neon_ld4_q_postindex) {
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 64, x18);
+ CHECK_EQUAL_64(src_base + 1 + 64, x28);
CHECK_EQUAL_64(src_base + 2 + 64, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
@@ -4304,9 +4307,9 @@ TEST(neon_ld4_lane_postindex) {
__ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex));
}
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
for (int i = 7; i >= 0; i--) {
- __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x18, 8, PostIndex));
+ __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x28, 8, PostIndex));
}
__ Mov(x19, src_base);
@@ -4401,7 +4404,7 @@ TEST(neon_ld4_lane_postindex) {
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
CHECK_EQUAL_64(src_base + 64, x17);
- CHECK_EQUAL_64(src_base + 64, x18);
+ CHECK_EQUAL_64(src_base + 64, x28);
CHECK_EQUAL_64(src_base + 64, x19);
CHECK_EQUAL_64(src_base + 64, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -4422,7 +4425,6 @@ TEST(neon_ld4_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x17));
__ Add(x17, x17, 4);
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17));
@@ -4480,22 +4482,20 @@ TEST(neon_ld4_alllanes_postindex) {
src[i] = i;
}
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
- __ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
MemOperand(x17, 4, PostIndex));
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(),
MemOperand(x17, 8, PostIndex));
__ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(),
MemOperand(x17, 16, PostIndex));
__ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(),
@@ -4547,32 +4547,32 @@ TEST(neon_st1_lane) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17));
__ Add(x17, x17, 1);
}
- __ Ldr(q1, MemOperand(x17, x18));
+ __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17));
__ Add(x17, x17, 2);
}
- __ Ldr(q2, MemOperand(x17, x18));
+ __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17));
__ Add(x17, x17, 4);
}
- __ Ldr(q3, MemOperand(x17, x18));
+ __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17));
__ Add(x17, x17, 8);
}
- __ Ldr(q4, MemOperand(x17, x18));
+ __ Ldr(q4, MemOperand(x17, x19));
END();
@@ -4595,17 +4595,17 @@ TEST(neon_st2_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
- __ St2(v0.B(), v1.B(), i, MemOperand(x18));
- __ Add(x18, x18, 2);
+ __ St2(v0.B(), v1.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 2);
}
for (int i = 15; i >= 0; i--) {
- __ St2(v0.B(), v1.B(), i, MemOperand(x18, 2, PostIndex));
+ __ St2(v0.B(), v1.B(), i, MemOperand(x19, 2, PostIndex));
}
__ Ldr(q2, MemOperand(x17, 0 * 16));
__ Ldr(q3, MemOperand(x17, 1 * 16));
@@ -4615,11 +4615,11 @@ TEST(neon_st2_lane) {
// Test H stores with and without post index.
__ Mov(x0, 4);
for (int i = 7; i >= 0; i--) {
- __ St2(v0.H(), v1.H(), i, MemOperand(x18));
- __ Add(x18, x18, 4);
+ __ St2(v0.H(), v1.H(), i, MemOperand(x19));
+ __ Add(x19, x19, 4);
}
for (int i = 7; i >= 0; i--) {
- __ St2(v0.H(), v1.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St2(v0.H(), v1.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q6, MemOperand(x17, 4 * 16));
__ Ldr(q7, MemOperand(x17, 5 * 16));
@@ -4628,11 +4628,11 @@ TEST(neon_st2_lane) {
// Test S stores with and without post index.
for (int i = 3; i >= 0; i--) {
- __ St2(v0.S(), v1.S(), i, MemOperand(x18));
- __ Add(x18, x18, 8);
+ __ St2(v0.S(), v1.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 8);
}
for (int i = 3; i >= 0; i--) {
- __ St2(v0.S(), v1.S(), i, MemOperand(x18, 8, PostIndex));
+ __ St2(v0.S(), v1.S(), i, MemOperand(x19, 8, PostIndex));
}
__ Ldr(q18, MemOperand(x17, 8 * 16));
__ Ldr(q19, MemOperand(x17, 9 * 16));
@@ -4641,11 +4641,11 @@ TEST(neon_st2_lane) {
// Test D stores with and without post index.
__ Mov(x0, 16);
- __ St2(v0.D(), v1.D(), 1, MemOperand(x18));
- __ Add(x18, x18, 16);
- __ St2(v0.D(), v1.D(), 0, MemOperand(x18, 16, PostIndex));
- __ St2(v0.D(), v1.D(), 1, MemOperand(x18, x0, PostIndex));
- __ St2(v0.D(), v1.D(), 0, MemOperand(x18, x0, PostIndex));
+ __ St2(v0.D(), v1.D(), 1, MemOperand(x19));
+ __ Add(x19, x19, 16);
+ __ St2(v0.D(), v1.D(), 0, MemOperand(x19, 16, PostIndex));
+ __ St2(v0.D(), v1.D(), 1, MemOperand(x19, x0, PostIndex));
+ __ St2(v0.D(), v1.D(), 0, MemOperand(x19, x0, PostIndex));
__ Ldr(q22, MemOperand(x17, 12 * 16));
__ Ldr(q23, MemOperand(x17, 13 * 16));
__ Ldr(q24, MemOperand(x17, 14 * 16));
@@ -4686,18 +4686,18 @@ TEST(neon_st3_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
- __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18));
- __ Add(x18, x18, 3);
+ __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 3);
}
for (int i = 15; i >= 0; i--) {
- __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18, 3, PostIndex));
+ __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19, 3, PostIndex));
}
__ Ldr(q3, MemOperand(x17, 0 * 16));
__ Ldr(q4, MemOperand(x17, 1 * 16));
@@ -4709,11 +4709,11 @@ TEST(neon_st3_lane) {
// Test H stores with and without post index.
__ Mov(x0, 6);
for (int i = 7; i >= 0; i--) {
- __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18));
- __ Add(x18, x18, 6);
+ __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19));
+ __ Add(x19, x19, 6);
}
for (int i = 7; i >= 0; i--) {
- __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q17, MemOperand(x17, 6 * 16));
__ Ldr(q18, MemOperand(x17, 7 * 16));
@@ -4724,11 +4724,11 @@ TEST(neon_st3_lane) {
// Test S stores with and without post index.
for (int i = 3; i >= 0; i--) {
- __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18));
- __ Add(x18, x18, 12);
+ __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 12);
}
for (int i = 3; i >= 0; i--) {
- __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18, 12, PostIndex));
+ __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19, 12, PostIndex));
}
__ Ldr(q23, MemOperand(x17, 12 * 16));
__ Ldr(q24, MemOperand(x17, 13 * 16));
@@ -4739,10 +4739,10 @@ TEST(neon_st3_lane) {
// Test D stores with and without post index.
__ Mov(x0, 24);
- __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18));
- __ Add(x18, x18, 24);
- __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x18, 24, PostIndex));
- __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18, x0, PostIndex));
+ __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19));
+ __ Add(x19, x19, 24);
+ __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x19, 24, PostIndex));
+ __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q29, MemOperand(x17, 18 * 16));
__ Ldr(q30, MemOperand(x17, 19 * 16));
__ Ldr(q31, MemOperand(x17, 20 * 16));
@@ -4783,7 +4783,7 @@ TEST(neon_st4_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
@@ -4791,8 +4791,8 @@ TEST(neon_st4_lane) {
// Test B stores without post index.
for (int i = 15; i >= 0; i--) {
- __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x18));
- __ Add(x18, x18, 4);
+ __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 4);
}
__ Ldr(q4, MemOperand(x17, 0 * 16));
__ Ldr(q5, MemOperand(x17, 1 * 16));
@@ -4802,7 +4802,7 @@ TEST(neon_st4_lane) {
// Test H stores with post index.
__ Mov(x0, 8);
for (int i = 7; i >= 0; i--) {
- __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q16, MemOperand(x17, 4 * 16));
__ Ldr(q17, MemOperand(x17, 5 * 16));
@@ -4811,8 +4811,8 @@ TEST(neon_st4_lane) {
// Test S stores without post index.
for (int i = 3; i >= 0; i--) {
- __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x18));
- __ Add(x18, x18, 16);
+ __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 16);
}
__ Ldr(q20, MemOperand(x17, 8 * 16));
__ Ldr(q21, MemOperand(x17, 9 * 16));
@@ -4821,8 +4821,8 @@ TEST(neon_st4_lane) {
// Test D stores with post index.
__ Mov(x0, 32);
- __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x18, 32, PostIndex));
- __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x18, x0, PostIndex));
+ __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x19, 32, PostIndex));
+ __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q24, MemOperand(x17, 12 * 16));
__ Ldr(q25, MemOperand(x17, 13 * 16));
@@ -4865,7 +4865,7 @@ TEST(neon_ld1_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -4879,7 +4879,7 @@ TEST(neon_ld1_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld1(v1.H(), i, MemOperand(x18, 2, PostIndex));
+ __ Ld1(v1.H(), i, MemOperand(x28, 2, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -4920,7 +4920,7 @@ TEST(neon_ld1_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
CHECK_EQUAL_64(src_base + 16, x17);
- CHECK_EQUAL_64(src_base + 16, x18);
+ CHECK_EQUAL_64(src_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(src_base + 16, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -4941,28 +4941,28 @@ TEST(neon_st1_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17, 1, PostIndex));
}
- __ Ldr(q1, MemOperand(x17, x18));
+ __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17, 2, PostIndex));
}
- __ Ldr(q2, MemOperand(x17, x18));
+ __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17, 4, PostIndex));
}
- __ Ldr(q3, MemOperand(x17, x18));
+ __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17, 8, PostIndex));
}
- __ Ldr(q4, MemOperand(x17, x18));
+ __ Ldr(q4, MemOperand(x17, x19));
END();
@@ -5027,12 +5027,12 @@ TEST(neon_ld1_alllanes_postindex) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld1r(v0.V8B(), MemOperand(x17, 1, PostIndex));
- __ Ld1r(v1.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld1r(v2.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld1r(v1.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld1r(v2.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v3.V8H(), MemOperand(x17, 2, PostIndex));
- __ Ld1r(v4.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld1r(v4.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v5.V4S(), MemOperand(x17, 4, PostIndex));
__ Ld1r(v6.V2D(), MemOperand(x17, 8, PostIndex));
END();
@@ -5116,7 +5116,7 @@ TEST(neon_st1_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -8);
+ __ Mov(x28, -8);
__ Mov(x19, -16);
__ Mov(x20, -24);
__ Mov(x21, -32);
@@ -5127,7 +5127,7 @@ TEST(neon_st1_d_postindex) {
__ Mov(x17, src_base);
__ St1(v0.V8B(), MemOperand(x17, 8, PostIndex));
- __ Ldr(d16, MemOperand(x17, x18));
+ __ Ldr(d16, MemOperand(x17, x28));
__ St1(v0.V8B(), v1.V8B(), MemOperand(x17, 16, PostIndex));
__ Ldr(q17, MemOperand(x17, x19));
@@ -5135,7 +5135,7 @@ TEST(neon_st1_d_postindex) {
__ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17, 24, PostIndex));
__ Ldr(d18, MemOperand(x17, x20));
__ Ldr(d19, MemOperand(x17, x19));
- __ Ldr(d20, MemOperand(x17, x18));
+ __ Ldr(d20, MemOperand(x17, x28));
__ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
MemOperand(x17, 32, PostIndex));
@@ -5223,7 +5223,7 @@ TEST(neon_st1_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x28, -16);
__ Mov(x19, -32);
__ Mov(x20, -48);
__ Mov(x21, -64);
@@ -5233,23 +5233,23 @@ TEST(neon_st1_q_postindex) {
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St1(v0.V16B(), MemOperand(x17, 16, PostIndex));
- __ Ldr(q16, MemOperand(x17, x18));
+ __ Ldr(q16, MemOperand(x17, x28));
__ St1(v0.V8H(), v1.V8H(), MemOperand(x17, 32, PostIndex));
__ Ldr(q17, MemOperand(x17, x19));
- __ Ldr(q18, MemOperand(x17, x18));
+ __ Ldr(q18, MemOperand(x17, x28));
__ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17, 48, PostIndex));
__ Ldr(q19, MemOperand(x17, x20));
__ Ldr(q20, MemOperand(x17, x19));
- __ Ldr(q21, MemOperand(x17, x18));
+ __ Ldr(q21, MemOperand(x17, x28));
__ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
MemOperand(x17, 64, PostIndex));
__ Ldr(q22, MemOperand(x17, x21));
__ Ldr(q23, MemOperand(x17, x20));
__ Ldr(q24, MemOperand(x17, x19));
- __ Ldr(q25, MemOperand(x17, x18));
+ __ Ldr(q25, MemOperand(x17, x28));
END();
@@ -5279,15 +5279,15 @@ TEST(neon_st2_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V8B(), v1.V8B(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St2(v0.V4H(), v1.V4H(), MemOperand(x18));
- __ Add(x18, x18, 11);
- __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
+ __ St2(v0.V8B(), v1.V8B(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St2(v0.V4H(), v1.V4H(), MemOperand(x19));
+ __ Add(x19, x19, 11);
+ __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5318,13 +5318,13 @@ TEST(neon_st2_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V8B(), v1.V8B(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V4H(), v1.V4H(), MemOperand(x18, 16, PostIndex));
- __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
+ __ St2(v0.V8B(), v1.V8B(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V4H(), v1.V4H(), MemOperand(x19, 16, PostIndex));
+ __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5352,17 +5352,17 @@ TEST(neon_st2_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V16B(), v1.V16B(), MemOperand(x18));
- __ Add(x18, x18, 8);
- __ St2(v0.V8H(), v1.V8H(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St2(v0.V4S(), v1.V4S(), MemOperand(x18));
- __ Add(x18, x18, 2);
- __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
+ __ St2(v0.V16B(), v1.V16B(), MemOperand(x19));
+ __ Add(x19, x19, 8);
+ __ St2(v0.V8H(), v1.V8H(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St2(v0.V4S(), v1.V4S(), MemOperand(x19));
+ __ Add(x19, x19, 2);
+ __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5393,14 +5393,14 @@ TEST(neon_st2_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V16B(), v1.V16B(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V8H(), v1.V8H(), MemOperand(x18, 32, PostIndex));
- __ St2(v0.V4S(), v1.V4S(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
+ __ St2(v0.V16B(), v1.V16B(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V8H(), v1.V8H(), MemOperand(x19, 32, PostIndex));
+ __ St2(v0.V4S(), v1.V4S(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5432,16 +5432,16 @@ TEST(neon_st3_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18));
- __ Add(x18, x18, 3);
- __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18));
- __ Add(x18, x18, 2);
- __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
+ __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19));
+ __ Add(x19, x19, 3);
+ __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19));
+ __ Add(x19, x19, 2);
+ __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5468,14 +5468,14 @@ TEST(neon_st3_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18, 24, PostIndex));
- __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
+ __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19, x22, PostIndex));
+ __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19, 24, PostIndex));
+ __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5505,18 +5505,18 @@ TEST(neon_st3_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18));
- __ Add(x18, x18, 5);
- __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
+ __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x19));
+ __ Add(x19, x19, 5);
+ __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x19));
+ __ Add(x19, x19, 12);
+ __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5551,15 +5551,15 @@ TEST(neon_st3_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18, 48, PostIndex));
- __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
+ __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x28, x22, PostIndex));
+ __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x28, 48, PostIndex));
+ __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x28, x22, PostIndex));
+ __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5595,17 +5595,17 @@ TEST(neon_st4_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x18));
- __ Add(x18, x18, 15);
- __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
+ __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x28));
+ __ Add(x28, x28, 12);
+ __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x28));
+ __ Add(x28, x28, 15);
+ __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5636,17 +5636,17 @@ TEST(neon_st4_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
- MemOperand(x18, x22, PostIndex));
+ MemOperand(x28, x22, PostIndex));
__ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(),
- MemOperand(x18, 32, PostIndex));
- __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
+ MemOperand(x28, 32, PostIndex));
+ __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5678,20 +5678,20 @@ TEST(neon_st4_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x18));
- __ Add(x18, x18, 5);
- __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
- __ Add(x18, x18, 10);
+ __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x28));
+ __ Add(x28, x28, 5);
+ __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x28));
+ __ Add(x28, x28, 12);
+ __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x28));
+ __ Add(x28, x28, 22);
+ __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
+ __ Add(x28, x28, 10);
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5728,19 +5728,19 @@ TEST(neon_st4_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(),
- MemOperand(x18, x22, PostIndex));
+ MemOperand(x28, x22, PostIndex));
__ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(),
- MemOperand(x18, 64, PostIndex));
+ MemOperand(x28, 64, PostIndex));
__ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(),
- MemOperand(x18, x22, PostIndex));
- __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
+ MemOperand(x28, x22, PostIndex));
+ __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -6065,13 +6065,13 @@ TEST(ldp_stp_offset) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, src_base + 24);
+ __ Mov(x28, src_base + 24);
__ Mov(x19, dst_base + 56);
__ Ldp(w0, w1, MemOperand(x16));
__ Ldp(w2, w3, MemOperand(x16, 4));
__ Ldp(x4, x5, MemOperand(x16, 8));
- __ Ldp(w6, w7, MemOperand(x18, -12));
- __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Ldp(w6, w7, MemOperand(x28, -12));
+ __ Ldp(x8, x9, MemOperand(x28, -16));
__ Stp(w0, w1, MemOperand(x17));
__ Stp(w2, w3, MemOperand(x17, 8));
__ Stp(x4, x5, MemOperand(x17, 16));
@@ -6100,7 +6100,7 @@ TEST(ldp_stp_offset) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(src_base + 24, x18);
+ CHECK_EQUAL_64(src_base + 24, x28);
CHECK_EQUAL_64(dst_base + 56, x19);
}
@@ -6120,13 +6120,13 @@ TEST(ldp_stp_offset_wide) {
START();
__ Mov(x20, src_base - base_offset);
__ Mov(x21, dst_base - base_offset);
- __ Mov(x18, src_base + base_offset + 24);
+ __ Mov(x28, src_base + base_offset + 24);
__ Mov(x19, dst_base + base_offset + 56);
__ Ldp(w0, w1, MemOperand(x20, base_offset));
__ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
__ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
- __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
- __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
+ __ Ldp(w6, w7, MemOperand(x28, -12 - base_offset));
+ __ Ldp(x8, x9, MemOperand(x28, -16 - base_offset));
__ Stp(w0, w1, MemOperand(x21, base_offset));
__ Stp(w2, w3, MemOperand(x21, base_offset + 8));
__ Stp(x4, x5, MemOperand(x21, base_offset + 16));
@@ -6155,7 +6155,7 @@ TEST(ldp_stp_offset_wide) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base - base_offset, x20);
CHECK_EQUAL_64(dst_base - base_offset, x21);
- CHECK_EQUAL_64(src_base + base_offset + 24, x18);
+ CHECK_EQUAL_64(src_base + base_offset + 24, x28);
CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
}
@@ -6172,7 +6172,7 @@ TEST(ldp_stp_preindex) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
__ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
@@ -6182,9 +6182,9 @@ TEST(ldp_stp_preindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
__ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
- __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
- __ Mov(x22, x18);
- __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8, PreIndex));
+ __ Mov(x22, x28);
+ __ Stp(x5, x4, MemOperand(x28, -8, PreIndex));
END();
RUN();
@@ -6204,7 +6204,7 @@ TEST(ldp_stp_preindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6227,7 +6227,7 @@ TEST(ldp_stp_preindex_wide) {
START();
__ Mov(x24, src_base - base_offset);
__ Mov(x25, dst_base + base_offset);
- __ Mov(x18, dst_base + base_offset + 16);
+ __ Mov(x28, dst_base + base_offset + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
__ Mov(x19, x24);
__ Mov(x24, src_base - base_offset + 4);
@@ -6241,10 +6241,10 @@ TEST(ldp_stp_preindex_wide) {
__ Mov(x21, x24);
__ Mov(x24, src_base - base_offset + 8);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
- __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
- __ Mov(x22, x18);
- __ Mov(x18, dst_base + base_offset + 16 + 8);
- __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PreIndex));
+ __ Mov(x22, x28);
+ __ Mov(x28, dst_base + base_offset + 16 + 8);
+ __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PreIndex));
END();
RUN();
@@ -6264,7 +6264,7 @@ TEST(ldp_stp_preindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x24);
CHECK_EQUAL_64(dst_base, x25);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6284,7 +6284,7 @@ TEST(ldp_stp_postindex) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
__ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
@@ -6294,9 +6294,9 @@ TEST(ldp_stp_postindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
__ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
- __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
- __ Mov(x22, x18);
- __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8, PostIndex));
+ __ Mov(x22, x28);
+ __ Stp(x5, x4, MemOperand(x28, -8, PostIndex));
END();
RUN();
@@ -6316,7 +6316,7 @@ TEST(ldp_stp_postindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6339,7 +6339,7 @@ TEST(ldp_stp_postindex_wide) {
START();
__ Mov(x24, src_base);
__ Mov(x25, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
__ Mov(x19, x24);
__ Sub(x24, x24, base_offset);
@@ -6353,10 +6353,10 @@ TEST(ldp_stp_postindex_wide) {
__ Mov(x21, x24);
__ Sub(x24, x24, base_offset);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
- __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
- __ Mov(x22, x18);
- __ Add(x18, x18, base_offset);
- __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PostIndex));
+ __ Mov(x22, x28);
+ __ Add(x28, x28, base_offset);
+ __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PostIndex));
END();
RUN();
@@ -6376,7 +6376,7 @@ TEST(ldp_stp_postindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base + base_offset, x24);
CHECK_EQUAL_64(dst_base - base_offset, x25);
- CHECK_EQUAL_64(dst_base - base_offset + 16, x18);
+ CHECK_EQUAL_64(dst_base - base_offset + 16, x28);
CHECK_EQUAL_64(src_base + base_offset + 4, x19);
CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
CHECK_EQUAL_64(src_base + base_offset + 8, x21);
@@ -6412,14 +6412,14 @@ TEST(ldur_stur) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base + 16);
__ Mov(x20, dst_base + 32);
__ Mov(x21, dst_base + 40);
__ Ldr(w0, MemOperand(x17, 1));
- __ Str(w0, MemOperand(x18, 2));
+ __ Str(w0, MemOperand(x28, 2));
__ Ldr(x1, MemOperand(x17, 3));
- __ Str(x1, MemOperand(x18, 9));
+ __ Str(x1, MemOperand(x28, 9));
__ Ldr(w2, MemOperand(x19, -9));
__ Str(w2, MemOperand(x20, -5));
__ Ldrb(w3, MemOperand(x19, -1));
@@ -6438,28 +6438,18 @@ TEST(ldur_stur) {
CHECK_EQUAL_64(0x00000001, x3);
CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base, x18);
+ CHECK_EQUAL_64(dst_base, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
}
-namespace {
-
-void LoadLiteral(MacroAssembler* masm, Register reg, uint64_t imm) {
- // Since we do not allow non-relocatable entries in the literal pool, we need
- // to fake a relocation mode that is not NONE here.
- masm->Ldr(reg, Immediate(imm, RelocInfo::FULL_EMBEDDED_OBJECT));
-}
-
-} // namespace
-
TEST(ldr_pcrel_large_offset) {
INIT_V8();
SETUP_SIZE(1 * MB);
START();
- LoadLiteral(&masm, x1, 0x1234567890ABCDEFUL);
+ __ Ldr(x1, isolate->factory()->undefined_value());
{
v8::internal::PatchingAssembler::BlockPoolsScope scope(&masm);
@@ -6469,14 +6459,14 @@ TEST(ldr_pcrel_large_offset) {
}
}
- LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
+ __ Ldr(x2, isolate->factory()->undefined_value());
END();
RUN();
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x1);
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x1);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x2);
}
TEST(ldr_literal) {
@@ -6484,132 +6474,159 @@ TEST(ldr_literal) {
SETUP();
START();
- LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
+ __ Ldr(x2, isolate->factory()->undefined_value());
END();
RUN();
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x2);
}
#ifdef DEBUG
// These tests rely on functions available in debug mode.
enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
+enum LiteralPoolEmissionAlignment { EmitAtUnaligned, EmitAtAligned };
-static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
- size_t prepadding = 0) {
+static void LdrLiteralRangeHelper(
+ size_t range, LiteralPoolEmitOutcome outcome,
+ LiteralPoolEmissionAlignment unaligned_emission) {
SETUP_SIZE(static_cast<int>(range + 1024));
- size_t code_size = 0;
- const size_t pool_entries = 2;
- const size_t kEntrySize = 8;
+ const size_t first_pool_entries = 2;
+ const size_t first_pool_size_bytes = first_pool_entries * kInt64Size;
START();
// Force a pool dump so the pool starts off empty.
- __ CheckConstPool(true, true);
+ __ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
- // Emit prepadding to influence alignment of the pool; we don't count this
- // into code size.
- for (size_t i = 0; i < prepadding; ++i) __ Nop();
+ // Emit prepadding to influence alignment of the pool.
+ bool currently_aligned = IsAligned(__ pc_offset(), kInt64Size);
+ if ((unaligned_emission == EmitAtUnaligned && currently_aligned) ||
+ (unaligned_emission == EmitAtAligned && !currently_aligned)) {
+ __ Nop();
+ }
- LoadLiteral(&masm, x0, 0x1234567890ABCDEFUL);
- LoadLiteral(&masm, x1, 0xABCDEF1234567890UL);
- code_size += 2 * kInstrSize;
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ int initial_pc_offset = __ pc_offset();
+ __ Ldr(x0, isolate->factory()->undefined_value());
+ __ Ldr(x1, isolate->factory()->the_hole_value());
+ CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
- // Check that the requested range (allowing space for a branch over the pool)
- // can be handled by this test.
- CHECK_LE(code_size, range);
+ size_t expected_pool_size = 0;
#if defined(_M_ARM64) && !defined(__clang__)
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
#else
- auto PoolSizeAt = [](int pc_offset) {
+ auto PoolSizeAt = [unaligned_emission](int pc_offset) {
#endif
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
size_t pc = pc_offset + prologue_size;
- const size_t padding = IsAligned(pc, 8) ? 0 : 4;
- return prologue_size + pool_entries * kEntrySize + padding;
+ const size_t padding = IsAligned(pc, kInt64Size) ? 0 : kInt32Size;
+ CHECK_EQ(padding == 0, unaligned_emission == EmitAtAligned);
+ return prologue_size + first_pool_size_bytes + padding;
};
int pc_offset_before_emission = -1;
- // Emit NOPs up to 'range'.
- while (code_size < range) {
+ bool pool_was_emitted = false;
+ while (__ pc_offset() - initial_pc_offset < static_cast<intptr_t>(range)) {
pc_offset_before_emission = __ pc_offset() + kInstrSize;
__ Nop();
- code_size += kInstrSize;
+ if (__ GetConstantPoolEntriesSizeForTesting() == 0) {
+ pool_was_emitted = true;
+ break;
+ }
}
- CHECK_EQ(code_size, range);
if (outcome == EmitExpected) {
- CHECK_CONSTANT_POOL_SIZE(0);
+ if (!pool_was_emitted) {
+ FATAL(
+ "Pool was not emitted up to pc_offset %d which corresponds to a "
+ "distance to the first constant of %d bytes",
+ __ pc_offset(), __ pc_offset() - initial_pc_offset);
+ }
// Check that the size of the emitted constant pool is as expected.
- size_t pool_size = PoolSizeAt(pc_offset_before_emission);
- CHECK_EQ(pc_offset_before_emission + pool_size, __ pc_offset());
- byte* pool_start = buf + pc_offset_before_emission;
- Instruction* branch = reinterpret_cast<Instruction*>(pool_start);
- CHECK(branch->IsImmBranch());
- CHECK_EQ(pool_size, branch->ImmPCOffset());
- Instruction* marker =
- reinterpret_cast<Instruction*>(pool_start + kInstrSize);
- CHECK(marker->IsLdrLiteralX());
- const size_t padding =
- IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
- CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
-
+ expected_pool_size = PoolSizeAt(pc_offset_before_emission);
+ CHECK_EQ(pc_offset_before_emission + expected_pool_size, __ pc_offset());
} else {
CHECK_EQ(outcome, NoEmitExpected);
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ if (pool_was_emitted) {
+ FATAL("Pool was unexpectedly emitted at pc_offset %d ",
+ pc_offset_before_emission);
+ }
+ CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
CHECK_EQ(pc_offset_before_emission, __ pc_offset());
}
// Force a pool flush to check that a second pool functions correctly.
- __ CheckConstPool(true, true);
+ __ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
- LoadLiteral(&masm, x4, 0x34567890ABCDEF12UL);
- LoadLiteral(&masm, x5, 0xABCDEF0123456789UL);
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ const int second_pool_entries = 2;
+ __ Ldr(x4, isolate->factory()->true_value());
+ __ Ldr(x5, isolate->factory()->false_value());
+ CHECK_CONSTANT_POOL_SIZE(second_pool_entries * kInt64Size);
+
END();
+ if (outcome == EmitExpected) {
+ Address pool_start = code->InstructionStart() + pc_offset_before_emission;
+ Instruction* branch = reinterpret_cast<Instruction*>(pool_start);
+ CHECK(branch->IsImmBranch());
+ CHECK_EQ(expected_pool_size, branch->ImmPCOffset());
+ Instruction* marker =
+ reinterpret_cast<Instruction*>(pool_start + kInstrSize);
+ CHECK(marker->IsLdrLiteralX());
+ size_t pool_data_start_offset = pc_offset_before_emission + kInstrSize;
+ size_t padding =
+ IsAligned(pool_data_start_offset, kInt64Size) ? 0 : kInt32Size;
+ size_t marker_size = kInstrSize;
+ CHECK_EQ((first_pool_size_bytes + marker_size + padding) / kInt32Size,
+ marker->ImmLLiteral());
+ }
+
RUN();
// Check that the literals loaded correctly.
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x0);
- CHECK_EQUAL_64(0xABCDEF1234567890UL, x1);
- CHECK_EQUAL_64(0x34567890ABCDEF12UL, x4);
- CHECK_EQUAL_64(0xABCDEF0123456789UL, x5);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x0);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->the_hole_value(), x1);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->true_value(), x4);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->false_value(), x5);
}
TEST(ldr_literal_range_max_dist_emission_1) {
INIT_V8();
- LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
- EmitExpected);
+ LdrLiteralRangeHelper(
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ EmitExpected, EmitAtAligned);
}
TEST(ldr_literal_range_max_dist_emission_2) {
INIT_V8();
- LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
- EmitExpected, 1);
+ LdrLiteralRangeHelper(
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ EmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(
- MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
- NoEmitExpected);
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ NoEmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
- MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
- NoEmitExpected, 1);
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ NoEmitExpected, EmitAtAligned);
}
#endif
@@ -6682,7 +6699,7 @@ TEST(add_sub_wide_imm) {
__ Add(w12, w0, Operand(0x12345678));
__ Add(w13, w1, Operand(0xFFFFFFFF));
- __ Add(w18, w0, Operand(kWMinInt));
+ __ Add(w28, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt));
__ Sub(x20, x0, Operand(0x1234567890ABCDEFUL));
@@ -6697,7 +6714,7 @@ TEST(add_sub_wide_imm) {
CHECK_EQUAL_32(0x12345678, w12);
CHECK_EQUAL_64(0x0, x13);
- CHECK_EQUAL_32(kWMinInt, w18);
+ CHECK_EQUAL_32(kWMinInt, w28);
CHECK_EQUAL_32(kWMinInt, w19);
CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20);
@@ -6720,7 +6737,7 @@ TEST(add_sub_shifted) {
__ Add(x13, x0, Operand(x1, ASR, 8));
__ Add(x14, x0, Operand(x2, ASR, 8));
__ Add(w15, w0, Operand(w1, ASR, 8));
- __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(w28, w3, Operand(w1, ROR, 8));
__ Add(x19, x3, Operand(x1, ROR, 8));
__ Sub(x20, x3, Operand(x2));
@@ -6741,7 +6758,7 @@ TEST(add_sub_shifted) {
CHECK_EQUAL_64(0x000123456789ABCDL, x13);
CHECK_EQUAL_64(0xFFFEDCBA98765432L, x14);
CHECK_EQUAL_64(0xFF89ABCD, x15);
- CHECK_EQUAL_64(0xEF89ABCC, x18);
+ CHECK_EQUAL_64(0xEF89ABCC, x28);
CHECK_EQUAL_64(0xEF0123456789ABCCL, x19);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x20);
@@ -6773,7 +6790,7 @@ TEST(add_sub_extended) {
__ Add(x15, x0, Operand(x1, SXTB, 1));
__ Add(x16, x0, Operand(x1, SXTH, 2));
__ Add(x17, x0, Operand(x1, SXTW, 3));
- __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x4, x0, Operand(x2, SXTB, 0));
__ Add(x19, x0, Operand(x2, SXTB, 1));
__ Add(x20, x0, Operand(x2, SXTH, 2));
__ Add(x21, x0, Operand(x2, SXTW, 3));
@@ -6803,7 +6820,7 @@ TEST(add_sub_extended) {
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFDEL, x15);
CHECK_EQUAL_64(0xFFFFFFFFFFFF37BCL, x16);
CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x17);
- CHECK_EQUAL_64(0x10L, x18);
+ CHECK_EQUAL_64(0x10L, x4);
CHECK_EQUAL_64(0x20L, x19);
CHECK_EQUAL_64(0xC840L, x20);
CHECK_EQUAL_64(0x3B2A19080L, x21);
@@ -7430,7 +7447,7 @@ TEST(adc_sbc_shift) {
// Set the C flag.
__ Cmp(w0, Operand(w0));
- __ Adc(x18, x2, Operand(x3));
+ __ Adc(x28, x2, Operand(x3));
__ Adc(x19, x0, Operand(x1, LSL, 60));
__ Sbc(x20, x4, Operand(x3, LSR, 4));
__ Adc(x21, x2, Operand(x3, ASR, 4));
@@ -7457,7 +7474,7 @@ TEST(adc_sbc_shift) {
CHECK_EQUAL_32(0x91111110, w13);
CHECK_EQUAL_32(0x9A222221, w14);
- CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x18);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x28);
CHECK_EQUAL_64((1LL << 60) + 1, x19);
CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
@@ -7581,7 +7598,7 @@ TEST(adc_sbc_wide_imm) {
// Set the C flag.
__ Cmp(w0, Operand(w0));
- __ Adc(x18, x0, Operand(0x1234567890ABCDEFUL));
+ __ Adc(x28, x0, Operand(0x1234567890ABCDEFUL));
__ Adc(w19, w0, Operand(0xFFFFFFFF));
__ Sbc(x20, x0, Operand(0x1234567890ABCDEFUL));
__ Sbc(w21, w0, Operand(0xFFFFFFFF));
@@ -7598,7 +7615,7 @@ TEST(adc_sbc_wide_imm) {
CHECK_EQUAL_64(0xFFFFFFFF, x11);
CHECK_EQUAL_64(0xFFFF, x12);
- CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x18);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x28);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xEDCBA9876F543211UL, x20);
CHECK_EQUAL_64(1, x21);
@@ -7758,7 +7775,7 @@ TEST(cmp_shift) {
SETUP();
START();
- __ Mov(x18, 0xF0000000);
+ __ Mov(x28, 0xF0000000);
__ Mov(x19, 0xF000000010000000UL);
__ Mov(x20, 0xF0000000F0000000UL);
__ Mov(x21, 0x7800000078000000UL);
@@ -7778,7 +7795,7 @@ TEST(cmp_shift) {
__ Cmp(w19, Operand(w23, LSR, 3));
__ Mrs(x2, NZCV);
- __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Cmp(x28, Operand(x24, LSR, 4));
__ Mrs(x3, NZCV);
__ Cmp(w20, Operand(w25, ASR, 2));
@@ -7993,7 +8010,7 @@ TEST(csel) {
__ Cneg(x12, x24, ne);
__ csel(w15, w24, w25, al);
- __ csel(x18, x24, x25, nv);
+ __ csel(x28, x24, x25, nv);
__ CzeroX(x24, ne);
__ CzeroX(x25, eq);
@@ -8020,7 +8037,7 @@ TEST(csel) {
CHECK_EQUAL_64(0x0000000F, x13);
CHECK_EQUAL_64(0x0000000F0000000FUL, x14);
CHECK_EQUAL_64(0x0000000F, x15);
- CHECK_EQUAL_64(0x0000000F0000000FUL, x18);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x28);
CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x0000001F0000001FUL, x25);
CHECK_EQUAL_64(0x0000001F0000001FUL, x26);
@@ -8032,11 +8049,11 @@ TEST(csel_imm) {
SETUP();
START();
- __ Mov(x18, 0);
+ __ Mov(x28, 0);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
- __ Cmp(x18, Operand(0));
+ __ Cmp(x28, Operand(0));
__ Csel(w0, w19, -2, ne);
__ Csel(w1, w19, -1, ne);
__ Csel(w2, w19, 0, ne);
@@ -8098,7 +8115,7 @@ TEST(lslv) {
__ Lsl(x16, x0, x1);
__ Lsl(x17, x0, x2);
- __ Lsl(x18, x0, x3);
+ __ Lsl(x28, x0, x3);
__ Lsl(x19, x0, x4);
__ Lsl(x20, x0, x5);
__ Lsl(x21, x0, x6);
@@ -8116,7 +8133,7 @@ TEST(lslv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value << (shift[0] & 63), x16);
CHECK_EQUAL_64(value << (shift[1] & 63), x17);
- CHECK_EQUAL_64(value << (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value << (shift[2] & 63), x28);
CHECK_EQUAL_64(value << (shift[3] & 63), x19);
CHECK_EQUAL_64(value << (shift[4] & 63), x20);
CHECK_EQUAL_64(value << (shift[5] & 63), x21);
@@ -8148,7 +8165,7 @@ TEST(lsrv) {
__ Lsr(x16, x0, x1);
__ Lsr(x17, x0, x2);
- __ Lsr(x18, x0, x3);
+ __ Lsr(x28, x0, x3);
__ Lsr(x19, x0, x4);
__ Lsr(x20, x0, x5);
__ Lsr(x21, x0, x6);
@@ -8166,7 +8183,7 @@ TEST(lsrv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
- CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
@@ -8200,7 +8217,7 @@ TEST(asrv) {
__ Asr(x16, x0, x1);
__ Asr(x17, x0, x2);
- __ Asr(x18, x0, x3);
+ __ Asr(x28, x0, x3);
__ Asr(x19, x0, x4);
__ Asr(x20, x0, x5);
__ Asr(x21, x0, x6);
@@ -8218,7 +8235,7 @@ TEST(asrv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
- CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
@@ -8252,7 +8269,7 @@ TEST(rorv) {
__ Ror(x16, x0, x1);
__ Ror(x17, x0, x2);
- __ Ror(x18, x0, x3);
+ __ Ror(x28, x0, x3);
__ Ror(x19, x0, x4);
__ Ror(x20, x0, x5);
__ Ror(x21, x0, x6);
@@ -8270,7 +8287,7 @@ TEST(rorv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(0xF0123456789ABCDEUL, x16);
CHECK_EQUAL_64(0xEF0123456789ABCDUL, x17);
- CHECK_EQUAL_64(0xDEF0123456789ABCUL, x18);
+ CHECK_EQUAL_64(0xDEF0123456789ABCUL, x28);
CHECK_EQUAL_64(0xCDEF0123456789ABUL, x19);
CHECK_EQUAL_64(0xABCDEF0123456789UL, x20);
CHECK_EQUAL_64(0x789ABCDEF0123456UL, x21);
@@ -8338,7 +8355,7 @@ TEST(sbfm) {
__ sbfm(w17, w2, 24, 15);
// Aliases.
- __ Asr(x18, x1, 32);
+ __ Asr(x3, x1, 32);
__ Asr(x19, x2, 32);
__ Sbfiz(x20, x1, 8, 16);
__ Sbfiz(x21, x2, 8, 16);
@@ -8364,7 +8381,7 @@ TEST(sbfm) {
CHECK_EQUAL_32(0x54, w16);
CHECK_EQUAL_32(0x00321000, w17);
- CHECK_EQUAL_64(0x01234567L, x18);
+ CHECK_EQUAL_64(0x01234567L, x3);
CHECK_EQUAL_64(0xFFFFFFFFFEDCBA98L, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFCDEF00L, x20);
CHECK_EQUAL_64(0x321000L, x21);
@@ -8403,7 +8420,7 @@ TEST(ubfm) {
__ Lsl(x15, x1, 63);
__ Lsl(x16, x1, 0);
__ Lsr(x17, x1, 32);
- __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfiz(x3, x1, 8, 16);
__ Ubfx(x19, x1, 8, 16);
__ Uxtb(x20, x1);
__ Uxth(x21, x1);
@@ -8425,7 +8442,7 @@ TEST(ubfm) {
CHECK_EQUAL_64(0x8000000000000000L, x15);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x16);
CHECK_EQUAL_64(0x01234567L, x17);
- CHECK_EQUAL_64(0xCDEF00L, x18);
+ CHECK_EQUAL_64(0xCDEF00L, x3);
CHECK_EQUAL_64(0xABCDL, x19);
CHECK_EQUAL_64(0xEFL, x20);
CHECK_EQUAL_64(0xCDEFL, x21);
@@ -9359,8 +9376,8 @@ TEST(fcmp) {
__ Fmov(s8, 0.0);
__ Fmov(s9, 0.5);
- __ Mov(w18, 0x7F800001); // Single precision NaN.
- __ Fmov(s18, w18);
+ __ Mov(w19, 0x7F800001); // Single precision NaN.
+ __ Fmov(s18, w19);
__ Fcmp(s8, s8);
__ Mrs(x0, NZCV);
@@ -10190,6 +10207,9 @@ TEST(fcvtas) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10207,8 +10227,8 @@ TEST(fcvtas) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 2.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 2.5);
__ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10239,7 +10259,6 @@ TEST(fcvtas) {
__ Fcvtas(w14, d14);
__ Fcvtas(w15, d15);
__ Fcvtas(x17, s17);
- __ Fcvtas(x18, s18);
__ Fcvtas(x19, s19);
__ Fcvtas(x20, s20);
__ Fcvtas(x21, s21);
@@ -10250,6 +10269,12 @@ TEST(fcvtas) {
__ Fcvtas(x26, d26);
__ Fcvtas(x27, d27);
__ Fcvtas(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtas(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtas(x29, d29);
__ Fcvtas(x30, d30);
END();
@@ -10272,8 +10297,8 @@ TEST(fcvtas) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(3, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(3, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10339,7 +10364,7 @@ TEST(fcvtau) {
__ Fcvtau(w15, d15);
__ Fcvtau(x16, s16);
__ Fcvtau(x17, s17);
- __ Fcvtau(x18, s18);
+ __ Fcvtau(x7, s18);
__ Fcvtau(x19, s19);
__ Fcvtau(x20, s20);
__ Fcvtau(x21, s21);
@@ -10371,7 +10396,7 @@ TEST(fcvtau) {
CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(3, x18);
+ CHECK_EQUAL_64(3, x7);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
@@ -10389,6 +10414,9 @@ TEST(fcvtms) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10406,8 +10434,8 @@ TEST(fcvtms) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10438,7 +10466,6 @@ TEST(fcvtms) {
__ Fcvtms(w14, d14);
__ Fcvtms(w15, d15);
__ Fcvtms(x17, s17);
- __ Fcvtms(x18, s18);
__ Fcvtms(x19, s19);
__ Fcvtms(x20, s20);
__ Fcvtms(x21, s21);
@@ -10449,6 +10476,12 @@ TEST(fcvtms) {
__ Fcvtms(x26, d26);
__ Fcvtms(x27, d27);
__ Fcvtms(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtms(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtms(x29, d29);
__ Fcvtms(x30, d30);
END();
@@ -10471,8 +10504,8 @@ TEST(fcvtms) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10491,6 +10524,9 @@ TEST(fcvtmu) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10508,8 +10544,8 @@ TEST(fcvtmu) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10538,8 +10574,8 @@ TEST(fcvtmu) {
__ Fcvtmu(w12, d12);
__ Fcvtmu(w13, d13);
__ Fcvtmu(w14, d14);
+ __ Fcvtmu(w15, d15);
__ Fcvtmu(x17, s17);
- __ Fcvtmu(x18, s18);
__ Fcvtmu(x19, s19);
__ Fcvtmu(x20, s20);
__ Fcvtmu(x21, s21);
@@ -10550,6 +10586,12 @@ TEST(fcvtmu) {
__ Fcvtmu(x26, d26);
__ Fcvtmu(x27, d27);
__ Fcvtmu(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtmu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtmu(x29, d29);
__ Fcvtmu(x30, d30);
END();
@@ -10571,8 +10613,9 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
+ CHECK_EQUAL_64(0x0, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
@@ -10591,6 +10634,9 @@ TEST(fcvtns) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10608,8 +10654,8 @@ TEST(fcvtns) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10640,7 +10686,6 @@ TEST(fcvtns) {
__ Fcvtns(w14, d14);
__ Fcvtns(w15, d15);
__ Fcvtns(x17, s17);
- __ Fcvtns(x18, s18);
__ Fcvtns(x19, s19);
__ Fcvtns(x20, s20);
__ Fcvtns(x21, s21);
@@ -10651,6 +10696,12 @@ TEST(fcvtns) {
__ Fcvtns(x26, d26);
__ Fcvtns(x27, d27);
// __ Fcvtns(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtns(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtns(x29, d29);
__ Fcvtns(x30, d30);
END();
@@ -10673,8 +10724,8 @@ TEST(fcvtns) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(2, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10701,6 +10752,7 @@ TEST(fcvtnu) {
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
__ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX.
+ __ Fmov(s7, 1.5);
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -10710,7 +10762,6 @@ TEST(fcvtnu) {
__ Fmov(d14, 0xFFFFFFFE);
__ Fmov(s16, 1.0);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10730,6 +10781,7 @@ TEST(fcvtnu) {
__ Fcvtnu(w4, s4);
__ Fcvtnu(w5, s5);
__ Fcvtnu(w6, s6);
+ __ Fcvtnu(x7, s7);
__ Fcvtnu(w8, d8);
__ Fcvtnu(w9, d9);
__ Fcvtnu(w10, d10);
@@ -10740,7 +10792,6 @@ TEST(fcvtnu) {
__ Fcvtnu(w15, d15);
__ Fcvtnu(x16, s16);
__ Fcvtnu(x17, s17);
- __ Fcvtnu(x18, s18);
__ Fcvtnu(x19, s19);
__ Fcvtnu(x20, s20);
__ Fcvtnu(x21, s21);
@@ -10763,6 +10814,7 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0xFFFFFF00, x6);
+ CHECK_EQUAL_64(2, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(2, x10);
@@ -10772,7 +10824,6 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
@@ -10790,6 +10841,9 @@ TEST(fcvtzs) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10807,8 +10861,8 @@ TEST(fcvtzs) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10839,7 +10893,6 @@ TEST(fcvtzs) {
__ Fcvtzs(w14, d14);
__ Fcvtzs(w15, d15);
__ Fcvtzs(x17, s17);
- __ Fcvtzs(x18, s18);
__ Fcvtzs(x19, s19);
__ Fcvtzs(x20, s20);
__ Fcvtzs(x21, s21);
@@ -10850,6 +10903,12 @@ TEST(fcvtzs) {
__ Fcvtzs(x26, d26);
__ Fcvtzs(x27, d27);
__ Fcvtzs(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtmu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtzs(x29, d29);
__ Fcvtzs(x30, d30);
END();
@@ -10872,8 +10931,8 @@ TEST(fcvtzs) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10892,6 +10951,9 @@ TEST(fcvtzu) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10909,8 +10971,8 @@ TEST(fcvtzu) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10939,8 +11001,8 @@ TEST(fcvtzu) {
__ Fcvtzu(w12, d12);
__ Fcvtzu(w13, d13);
__ Fcvtzu(w14, d14);
+ __ Fcvtzu(w15, d15);
__ Fcvtzu(x17, s17);
- __ Fcvtzu(x18, s18);
__ Fcvtzu(x19, s19);
__ Fcvtzu(x20, s20);
__ Fcvtzu(x21, s21);
@@ -10951,6 +11013,12 @@ TEST(fcvtzu) {
__ Fcvtzu(x26, d26);
__ Fcvtzu(x27, d27);
__ Fcvtzu(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtzu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtzu(x29, d29);
__ Fcvtzu(x30, d30);
END();
@@ -10972,8 +11040,9 @@ TEST(fcvtzu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
+ CHECK_EQUAL_64(0x0, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
@@ -11439,6 +11508,8 @@ TEST(zero_dest) {
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (int i = 2; i < x30.code(); i++) {
+ // Skip x18, the platform register.
+ if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
}
before.Dump(&masm);
@@ -11503,6 +11574,8 @@ TEST(zero_dest_setflags) {
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (int i = 2; i < 30; i++) {
+ // Skip x18, the platform register.
+ if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
}
before.Dump(&masm);
@@ -11858,10 +11931,14 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
// Registers in the TmpList can be used by the macro assembler for debug code
// (for example in 'Pop'), so we can't use them here.
- static RegList const allowed = ~(masm.TmpList()->list());
+ // x18 is reserved for the platform register.
+ // Disallow x31 / xzr, to ensure this list has an even number of elements, to
+ // ensure alignment.
+ RegList allowed = ~(masm.TmpList()->list() | x18.bit() | x31.bit());
if (reg_count == kPushPopMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
+ DCHECK_EQ(reg_count % 2, 0);
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, kNumberOfRegisters>();
auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
@@ -12245,7 +12322,7 @@ TEST(push_pop) {
__ Claim(2);
__ Push(w2, w2, w1, w1);
__ Push(x3, x3);
- __ Pop(w18, w19, w20, w21);
+ __ Pop(w30, w19, w20, w21);
__ Pop(x22, x23);
__ Claim(2);
@@ -12259,8 +12336,10 @@ TEST(push_pop) {
__ Claim(2);
__ PushXRegList(0);
__ PopXRegList(0);
- __ PushXRegList(0xFFFFFFFF);
- __ PopXRegList(0xFFFFFFFF);
+ // Don't push/pop x18 (platform register) or xzr (for alignment)
+ RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | x31.bit());
+ __ PushXRegList(all_regs);
+ __ PopXRegList(all_regs);
__ Drop(12);
END();
@@ -12287,7 +12366,7 @@ TEST(push_pop) {
CHECK_EQUAL_32(0x33333333U, w15);
CHECK_EQUAL_32(0x22222222U, w14);
- CHECK_EQUAL_32(0x11111111U, w18);
+ CHECK_EQUAL_32(0x11111111U, w30);
CHECK_EQUAL_32(0x11111111U, w19);
CHECK_EQUAL_32(0x11111111U, w20);
CHECK_EQUAL_32(0x11111111U, w21);
@@ -14242,12 +14321,11 @@ TEST(default_nan_double) {
DefaultNaNHelper(qn, qm, qa);
}
-TEST(call_no_relocation) {
+TEST(near_call_no_relocation) {
INIT_V8();
SETUP();
START();
- Address buf_addr = reinterpret_cast<Address>(buf);
Label function;
Label test;
@@ -14263,7 +14341,8 @@ TEST(call_no_relocation) {
__ Push(lr, xzr);
{
Assembler::BlockConstPoolScope scope(&masm);
- __ Call(buf_addr + function.pos(), RelocInfo::NONE);
+ int offset = (function.pos() - __ pc_offset()) / kInstrSize;
+ __ near_call(offset, RelocInfo::NONE);
}
__ Pop(xzr, lr);
END();
@@ -14273,7 +14352,6 @@ TEST(call_no_relocation) {
CHECK_EQUAL_64(1, x0);
}
-
static void AbsHelperX(int64_t value) {
int64_t expected;
@@ -14421,12 +14499,11 @@ TEST(pool_size) {
__ bind(&exit);
- HandleScope handle_scope(isolate);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB)
- .set_self_reference(masm.CodeObject())
- .Build();
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB)
+ .set_self_reference(masm.CodeObject())
+ .Build();
unsigned pool_count = 0;
int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
@@ -14625,6 +14702,7 @@ TEST(internal_reference_linked) {
#undef CHECK_EQUAL_32
#undef CHECK_EQUAL_FP32
#undef CHECK_EQUAL_64
+#undef CHECK_FULL_HEAP_OBJECT_IN_REGISTER
#undef CHECK_EQUAL_FP64
#undef CHECK_EQUAL_128
#undef CHECK_CONSTANT_POOL_SIZE