summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/macro-assembler-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/macro-assembler-arm64.cc')
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc244
1 files changed, 69 insertions, 175 deletions
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 74583523af..b15ab47473 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
@@ -305,23 +306,35 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
+ Address addr = static_cast<Address>(operand.ImmediateValue());
+ ExternalReference reference = bit_cast<ExternalReference>(addr);
+ IndirectLoadExternalReference(rd, reference);
+ return;
+ } else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
+ Handle<HeapObject> x(
+ reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
+ IndirectLoadConstant(rd, x);
+ return;
+ }
+ }
+ }
Ldr(dst, operand);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mov(dst, operand.ImmediateValue());
-
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
// could also be achieved using an orr instruction (like orn used by Mvn),
// but using a shift instruction makes the disassembly clearer.
EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
-
} else if (operand.IsExtendedRegister()) {
// Emit an extend instruction if moving an extended register. This handles
// extend with post-shift operations, too.
EmitExtendShift(dst, operand.reg(), operand.extend(),
operand.shift_amount());
-
} else {
// Otherwise, emit a register move only if the registers are distinct, or
// if they are not X registers.
@@ -347,16 +360,6 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
-void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(rd, reference);
- return;
- }
- }
- Mov(rd, Operand(reference));
-}
-
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
@@ -1049,8 +1052,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
@@ -1062,8 +1065,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
@@ -1078,9 +1081,9 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(count, size, dst0, dst1, dst2, dst3);
- PopPostamble(count, size);
}
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
@@ -1095,31 +1098,26 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(4, size, dst0, dst1, dst2, dst3);
PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
- PopPostamble(count, size);
}
void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
+ DCHECK_EQ(0, size % 16);
- PushPreamble(size);
// Reserve room for src0 and push src1.
str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
-
-void MacroAssembler::PushPopQueue::PushQueued(
- PreambleDirective preamble_directive) {
+void MacroAssembler::PushPopQueue::PushQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
- if (preamble_directive == WITH_PREAMBLE) {
- masm_->PushPreamble(size_);
- }
-
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
@@ -1141,6 +1139,7 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
size_t count = queued_.size();
@@ -1159,14 +1158,13 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]);
}
- masm_->PopPostamble(size_);
queued_.clear();
}
void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
- PushPreamble(registers.Count(), size);
// Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
@@ -1181,6 +1179,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
// Pop up to four registers at a time.
while (!registers.IsEmpty()) {
@@ -1192,12 +1191,9 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
- PopPostamble(registers.Count(), size);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
- PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
@@ -1316,39 +1312,6 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
}
-void TurboAssembler::PushPreamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PopPostamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PushPreamble(int count, int size) {
- PushPreamble(count * size);
-}
-void TurboAssembler::PopPostamble(int count, int size) {
- PopPostamble(count * size);
-}
-
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
@@ -1429,7 +1392,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code()) {
- TrapOnAbortScope trap_on_abort_scope(this); // Avoid calls to Abort.
+ HardAbortScope hard_abort(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
@@ -1563,24 +1526,12 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
- Move(result, Handle<HeapObject>::cast(object));
+ Mov(result, Handle<HeapObject>::cast(object));
} else {
Mov(result, Operand(Smi::cast(*object)));
}
}
-void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
-
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadConstant(dst, value);
- return;
- }
- }
- Mov(dst, value);
-}
-
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
@@ -1717,14 +1668,12 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
Operand operand = Operand::EmbeddedCode(stub);
near_call(operand.heap_object_request());
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize);
-#endif
+ DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallStub(CodeStub* stub) {
@@ -1871,9 +1820,9 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
- Move(destination, kRootRegister);
+ Mov(destination, kRootRegister);
} else {
- Add(destination, kRootRegister, Operand(offset));
+ Add(destination, kRootRegister, offset);
}
}
@@ -1896,7 +1845,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstructionSize;
+ uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
Mov(temp, Immediate(imm, rmode));
Br(temp);
}
@@ -1916,8 +1865,8 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
// address at this point, and needs to be encoded as-is.
if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
}
return offset;
}
@@ -1950,6 +1899,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -1970,26 +1920,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Register target) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
Blr(target);
-
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
-#endif
}
-// TurboAssembler::CallSize is sensitive to changes in this function, as it
-// requires to know how many instructions are used to branch to the target.
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
@@ -1998,17 +1933,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
} else {
IndirectCall(target, rmode);
}
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
-#endif
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
@@ -2029,6 +1957,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2045,19 +1974,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
} else {
IndirectCall(code.address(), rmode);
}
-
-#ifdef DEBUG
- // Check the size of the code generated.
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode));
-#endif
}
void TurboAssembler::Call(ExternalReference target) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- // Immediate is in charge of setting the relocation mode to
- // EXTERNAL_REFERENCE.
- Mov(temp, Immediate(target));
+ Mov(temp, target);
Call(temp);
}
@@ -2078,8 +2000,8 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
@@ -2091,29 +2013,12 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
-#endif
-}
-
-int TurboAssembler::CallSize(Register target) {
- USE(target);
- return kInstructionSize;
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
- USE(target);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- USE(code);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
+ DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2536,7 +2441,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
- Move(scratch, CodeObject());
+ Mov(scratch, CodeObject());
Push(scratch, padreg);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
@@ -2810,7 +2715,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::CheckPageFlag(const Register& object,
const Register& scratch, int mask,
Condition cc, Label* condition_met) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
@@ -2822,7 +2727,7 @@ void MacroAssembler::CheckPageFlag(const Register& object,
void TurboAssembler::CheckPageFlagSet(const Register& object,
const Register& scratch, int mask,
Label* if_any_set) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAnySet(scratch, mask, if_any_set);
}
@@ -2830,7 +2735,7 @@ void TurboAssembler::CheckPageFlagSet(const Register& object,
void TurboAssembler::CheckPageFlagClear(const Register& object,
const Register& scratch, int mask,
Label* if_all_clear) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAllClear(scratch, mask, if_all_clear);
}
@@ -2930,8 +2835,8 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
@@ -3044,38 +2949,26 @@ void TurboAssembler::Abort(AbortReason reason) {
RegList old_tmp_list = TmpList()->list();
TmpList()->Combine(MacroAssembler::DefaultTmpList());
- if (use_real_aborts()) {
- // Avoid infinite recursion; Push contains some assertions that use Abort.
- NoUseRealAbortsScope no_real_aborts(this);
-
- Move(x1, Smi::FromInt(static_cast<int>(reason)));
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- } else {
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- }
- } else {
- // Load the string to pass to Printf.
- Label msg_address;
- Adr(x0, &msg_address);
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ Mov(w0, static_cast<int>(reason));
+ Call(ExternalReference::abort_with_reason());
+ return;
+ }
- // Call Printf directly to report the error.
- CallPrintf();
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ HardAbortScope hard_aborts(this);
- // We need a way to stop execution on both the simulator and real hardware,
- // and Unreachable() is the best option.
- Unreachable();
+ Mov(x1, Smi::FromInt(static_cast<int>(reason)));
- // Emit the message string directly in the instruction stream.
- {
- BlockPoolsScope scope(this);
- Bind(&msg_address);
- EmitStringData(GetAbortReason(reason));
- }
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
TmpList()->set_list(old_tmp_list);
@@ -3216,7 +3109,8 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
- { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ {
+ InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
dc32(arg_count); // kPrintfArgCountOffset