summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/macro-assembler-arm64-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/macro-assembler-arm64-inl.h')
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h137
1 files changed, 22 insertions, 115 deletions
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 0861551d89..f96d4b20b8 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
-void MacroAssembler::CmovX(const Register& rd,
- const Register& rn,
+void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@@ -416,6 +415,11 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
+void TurboAssembler::Csdb() {
+ DCHECK(allow_macro_instructions());
+ csdb();
+}
+
void TurboAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -423,8 +427,7 @@ void TurboAssembler::Cset(const Register& rd, Condition cond) {
cset(rd, cond);
}
-
-void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+void TurboAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -461,14 +464,12 @@ void MacroAssembler::Csneg(const Register& rd,
csneg(rd, rn, rm, cond);
}
-
-void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
-
-void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
@@ -651,10 +652,12 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
if (bits == 0) {
fmov(vd, xzr);
} else {
- Ldr(vd, imm);
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, bits);
+ fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -678,12 +681,10 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
- // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -737,8 +738,7 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
-
-void MacroAssembler::Isb() {
+void TurboAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
@@ -748,12 +748,6 @@ void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
ldr(rt, operand);
}
-void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
- DCHECK(allow_macro_instructions());
- DCHECK(rt.Is64Bits());
- ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
-}
-
void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
@@ -1042,58 +1036,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
-void MacroAssembler::AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- DCHECK_GE(sp_alignment, 16);
- DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
-}
-
-void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
- DCHECK(!csp.Is(StackPointer()));
- if (!TmpList()->IsEmpty()) {
- Sub(csp, StackPointer(), space);
- } else {
- // TODO(jbramley): Several callers rely on this not using scratch
- // registers, so we use the assembler directly here. However, this means
- // that large immediate values of 'space' cannot be handled cleanly. (Only
- // 24-bits immediates or values of 'space' that can be encoded in one
- // instruction are accepted.) Once we implement our flexible scratch
- // register idea, we could greatly simplify this function.
- InstructionAccurateScope scope(this);
- DCHECK(space.IsImmediate());
- // Align to 16 bytes.
- uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
- DCHECK(is_uint24(imm));
-
- Register source = StackPointer();
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, source, 0xf);
- source = csp;
- }
- if (!is_uint12(imm)) {
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, source, imm_top_12_bits << 12);
- source = csp;
- imm -= imm_top_12_bits << 12;
- }
- if (imm > 0) {
- sub(csp, source, imm);
- }
- }
- AssertStackConsistency();
-}
-
-void TurboAssembler::SyncSystemStackPointer() {
- DCHECK(emit_debug_code());
- DCHECK(!csp.Is(StackPointer()));
- { InstructionAccurateScope scope(this);
- mov(csp, StackPointer());
- }
- AssertStackConsistency();
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -1249,14 +1191,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
+ DCHECK_EQ(size % 16, 0);
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@@ -1269,13 +1206,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
-
AssertPositiveOrZero(count);
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
@@ -1290,11 +1223,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@@ -1305,16 +1234,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
+ DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@@ -1329,14 +1250,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@@ -1378,14 +1292,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}