summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/macro-assembler-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/macro-assembler-arm64.cc')
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc388
1 files changed, 149 insertions, 239 deletions
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 3869046f74..267bc2151b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -15,6 +15,7 @@
#include "src/frame-constants.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -188,15 +189,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
- PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
- if (rd.Is(csp)) {
+ if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
- Mov(csp, temp);
- AssertStackConsistency();
+ Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@@ -294,7 +294,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
- AssertStackConsistency();
}
}
}
@@ -337,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
- // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ // If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@@ -596,11 +595,8 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
-
-void MacroAssembler::Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond) {
+void TurboAssembler::Csel(const Register& rd, const Register& rn,
+ const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -724,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
- if (rd.Is(csp)) {
+ if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
- } else if (rn.Is(csp)) {
+ } else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@@ -1105,9 +1101,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
- str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
- str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+ str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@@ -1166,9 +1162,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
- // Push up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in order
- // to maintain the 16-byte alignment for csp.
+ // Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@@ -1183,9 +1177,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- // Pop up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in
- // order to maintain the 16-byte alignment for csp.
+ // Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -1258,23 +1250,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
- str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
- stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
- stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
- str(src0, MemOperand(StackPointer(), 2 * size));
+ stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
+ str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
- // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
- stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
- stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@@ -1295,24 +1287,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
- ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
- ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
- ldr(dst2, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ ldr(dst2, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
- // registers to be popped using csp, whilst maintaining 16-byte alignment
- // for csp at all times.
- ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ // registers to be popped using sp, whilst maintaining 16-byte alignment
+ // for sp at all times.
+ ldp(dst2, dst3, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@@ -1322,43 +1314,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else {
- // Even if the current stack pointer is not the system stack pointer (csp),
- // the system stack pointer will still be modified in order to comply with
- // ABI rules about accessing memory below the system stack pointer.
- BumpSystemStackPointer(total_size);
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@@ -1376,7 +1352,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Str(src, MemOperand(StackPointer(), offset));
+ Str(src, MemOperand(sp, offset));
}
@@ -1388,14 +1364,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Ldr(dst, MemOperand(StackPointer(), offset));
+ Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
- Stp(src1, src2, MemOperand(StackPointer(), offset));
+ Stp(src1, src2, MemOperand(sp, offset));
}
@@ -1404,7 +1380,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
- Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+ Ldp(dst1, dst2, MemOperand(sp, offset));
}
@@ -1412,11 +1388,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@@ -1436,11 +1408,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@@ -1455,44 +1423,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
-void TurboAssembler::AssertStackConsistency() {
- // Avoid emitting code when !use_real_abort() since non-real aborts cause too
- // much code to be generated.
- if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer())) {
- // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
- // can't check the alignment of csp without using a scratch register (or
- // clobbering the flags), but the processor (or simulator) will abort if
- // it is not properly aligned during a load.
- ldr(xzr, MemOperand(csp, 0));
- }
- if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
- Label ok;
- // Check that csp <= StackPointer(), preserving all registers and NZCV.
- sub(StackPointer(), csp, StackPointer());
- cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
- tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
-
- // Avoid generating AssertStackConsistency checks for the Push in Abort.
- { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
- }
-
- bind(&ok);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- }
- }
-}
-
-void TurboAssembler::AssertCspAligned() {
+void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
- // TODO(titzer): use a real assert for alignment check?
+ // Arm64 requires the stack pointer to be 16-byte aligned prior to address
+ // calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
- ldr(temp, MemOperand(csp));
+ Mov(temp, sp);
+ Tst(temp, 15);
+ Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@@ -1568,11 +1507,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
- Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
+ Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
- Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
+ Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@@ -1630,6 +1569,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Swap(Register lhs, Register rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
+void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ VRegister temp = VRegister::no_reg();
+ if (lhs.IsS()) {
+ temp = temps.AcquireS();
+ } else if (lhs.IsD()) {
+ temp = temps.AcquireD();
+ } else {
+ DCHECK(lhs.IsQ());
+ temp = temps.AcquireQ();
+ }
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1792,6 +1759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ Mov(kOffHeapTrampolineRegister, bytes_address);
+ Br(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -1927,13 +1900,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- if (rmode == RelocInfo::NONE64) {
+ if (RelocInfo::IsNone(rmode)) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
@@ -2009,62 +1979,15 @@ int TurboAssembler::CallSize(Label* target) {
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type) {
- Label on_not_heap_number;
-
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, &on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
-
- Bind(&on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2110,12 +2033,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- Add(src_reg, StackPointer(),
- Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
- Add(src_reg, StackPointer(),
- (callee_args_count.immediate() + 1) * kPointerSize);
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@@ -2145,12 +2066,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(StackPointer(), src_reg);
+ Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(StackPointer(), dst_reg);
- AssertStackConsistency();
+ Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2224,12 +2144,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Mov(x4, Operand(debug_is_active));
+ Ldrsb(x4, MemOperand(x4));
+ Cbz(x4, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
+ Cbnz(x4, &call_hook);
+
+ Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(x4, &skip_hook);
+ Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
+ Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ B(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2284,7 +2220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = x4;
+ Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2343,16 +2279,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- LoadObject(x1, function);
- InvokeFunction(x1, expected, actual, flag);
-}
-
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
@@ -2402,7 +2328,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2414,21 +2340,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
- Mov(fp, csp);
+ Mov(fp, sp);
Push(type_reg, padreg);
- // csp[3] : lr
- // csp[2] : fp
- // csp[1] : type
- // csp[0] : for alignment
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2439,8 +2364,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, StackPointer(),
- TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@@ -2450,15 +2374,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(StackPointer(), fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
}
@@ -2493,7 +2414,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
- Mov(fp, StackPointer());
+ Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2540,13 +2461,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
- DCHECK(csp.Is(StackPointer()));
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
- Add(scratch, csp, kXRegSize);
+ Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -2555,8 +2474,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
- DCHECK(csp.Is(StackPointer()));
-
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@@ -2582,8 +2499,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
@@ -2752,7 +2668,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x31 (csp) because the system stack pointer doesn't need to be included
+ // - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@@ -3052,7 +2968,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
+ DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@@ -3164,12 +3080,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
- // We don't pass any arguments on the stack, but we still need to align the C
- // stack pointer to a 16-byte boundary for PCS compliance.
- if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xF);
- }
-
CallPrintf(arg_count, pcs);
}
@@ -3208,14 +3118,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
- // We can only print sp if it is the current stack pointer.
- if (!csp.Is(StackPointer())) {
- DCHECK(!csp.Aliases(arg0));
- DCHECK(!csp.Aliases(arg1));
- DCHECK(!csp.Aliases(arg2));
- DCHECK(!csp.Aliases(arg3));
- }
-
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -3224,8 +3126,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
- // If csp is the stack pointer, PushCPURegList asserts that the size of each
- // list is a multiple of 16 bytes.
+ // PushCPURegList asserts that the size of each list is a multiple of 16
+ // bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@@ -3241,15 +3143,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
- bool arg0_sp = StackPointer().Aliases(arg0);
- bool arg1_sp = StackPointer().Aliases(arg1);
- bool arg2_sp = StackPointer().Aliases(arg2);
- bool arg3_sp = StackPointer().Aliases(arg3);
+ bool arg0_sp = sp.Aliases(arg0);
+ bool arg1_sp = sp.Aliases(arg1);
+ bool arg2_sp = sp.Aliases(arg2);
+ bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
- Add(arg_sp, StackPointer(),
+ Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@@ -3302,7 +3204,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- DCHECK(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, sp));
return result;
}
@@ -3359,6 +3261,14 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
}
}
+void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
+ // We can use adr to load a pc relative location.
+ adr(rd, -pc_offset());
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Mov(kSpeculationPoisonRegister, -1);
+}
#undef __