summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/src/arm64
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h40
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc20
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h34
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc38
-rw-r--r--deps/v8/src/arm64/constants-arm64.h11
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h6
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc7
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc20
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h6
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc8
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h6
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h137
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc388
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h143
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc60
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h1
19 files changed, 347 insertions, 618 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 11c4bbf33f..0c31400d9c 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -13,8 +13,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return true; }
@@ -95,7 +94,7 @@ inline void CPURegList::Remove(int code) {
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return csp;
+ return sp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
@@ -105,7 +104,7 @@ inline Register Register::XRegFromCode(unsigned code) {
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return wcsp;
+ return wsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
@@ -198,9 +197,7 @@ inline VRegister CPURegister::Q() const {
template<typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(T) {
- return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
- }
+ static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
return t;
@@ -211,9 +208,7 @@ struct ImmediateInitializer {
template<>
struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(Smi* t) {
- return RelocInfo::NONE64;
- }
+ static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi* t) {;
return reinterpret_cast<int64_t>(t);
}
@@ -581,26 +576,23 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
-
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(isolate(), pc, sizeof(target));
+ // Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -647,7 +639,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -681,28 +673,28 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index a031884e1f..52c2e4643f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -181,22 +181,20 @@ uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -467,9 +465,6 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- // Constant pool currently does not support 32-bit entries.
- DCHECK(mode != RelocInfo::NONE32);
-
return RelocInfo::IsNone(mode) ||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
}
@@ -2994,6 +2989,8 @@ void Assembler::isb() {
Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
}
+void Assembler::csdb() { hint(CSDB); }
+
void Assembler::fmov(const VRegister& vd, double imm) {
if (vd.IsScalar()) {
DCHECK(vd.Is1D());
@@ -4745,6 +4742,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // Non-relocatable constants should not end up in the literal pool.
+ DCHECK(!RelocInfo::IsNone(rmode));
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 2deae8aaa4..c956c072b7 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -68,7 +68,6 @@ namespace internal {
// clang-format on
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
@@ -455,8 +454,8 @@ constexpr Register no_reg = NoReg;
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
-DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
+DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
+DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
@@ -994,7 +993,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
@@ -1008,12 +1007,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@@ -1754,6 +1752,9 @@ class Assembler : public AssemblerBase {
// Instruction synchronization barrier
void isb();
+ // Conditional speculation barrier.
+ void csdb();
+
// Alias for system instructions.
void nop() { hint(NOP); }
@@ -3677,18 +3678,9 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
-
- // This version will flush at destruction.
- PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
- : PatchingAssembler(IsolateData(isolate), start, count) {
- CHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- }
-
- // This version will not flush.
+ // Note that the instruction cache will not be flushed.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
- : Assembler(isolate_data, start, count * kInstructionSize + kGap),
- isolate_(nullptr) {
+ : Assembler(isolate_data, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@@ -3701,18 +3693,12 @@ class PatchingAssembler : public Assembler {
DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
- // Flush the Instruction cache.
- size_t length = buffer_size_ - kGap;
- if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
static constexpr int kAdrFarPatchableNNops = 2;
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
-
- private:
- Isolate* isolate_;
};
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 52f92b6af9..07d020880d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Str(x1, MemOperand(__ StackPointer(), x5));
+ __ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@@ -314,7 +314,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ EnterExitFrame(
save_doubles(), x10, extra_stack_space,
is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
- DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@@ -349,12 +348,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
- // csp[32]: Alignment padding, if necessary.
- // csp[24]: Preserved x23 (used for target).
- // csp[16]: Preserved x22 (used for argc).
- // csp[8]: Preserved x21 (used for argv).
- // csp -> csp[0]: Space reserved for the return address.
+ // sp[...]: Saved doubles, if saved_doubles is true.
+ // sp[32]: Alignment padding, if necessary.
+ // sp[24]: Preserved x23 (used for target).
+ // sp[16]: Preserved x22 (used for argc).
+ // sp[8]: Preserved x21 (used for argv).
+ // sp -> sp[0]: Space reserved for the return address.
//
// After a successful call, the exit frame, preserved registers (x21-x23) and
// the arguments (including the receiver) are dropped or popped as
@@ -364,8 +363,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
- DCHECK(csp.Is(__ StackPointer()));
-
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
@@ -437,7 +434,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
- DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
@@ -454,7 +450,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Register scratch = temps.AcquireX();
__ Mov(scratch, Operand(pending_handler_sp_address));
__ Ldr(scratch, MemOperand(scratch));
- __ Mov(csp, scratch);
+ __ Mov(sp, scratch);
}
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -466,6 +462,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
@@ -511,7 +513,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
- __ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
+ __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@@ -582,7 +584,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- __ Mov(scratch, __ StackPointer());
+ __ Mov(scratch, sp);
__ Str(scratch, MemOperand(x11));
}
@@ -740,10 +742,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- // Make sure the caller configured the stack pointer (see comment in
- // DirectCEntryStub::Generate).
- DCHECK(csp.Is(__ StackPointer()));
-
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
@@ -1260,7 +1258,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Prepare arguments.
Register args = x6;
- __ Mov(args, masm->StackPointer());
+ __ Mov(args, sp);
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
@@ -1344,7 +1342,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Mov(x0, sp); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index b02dd5d2d7..406b139a50 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -407,12 +407,13 @@ enum Extend {
};
enum SystemHint {
- NOP = 0,
+ NOP = 0,
YIELD = 1,
- WFE = 2,
- WFI = 3,
- SEV = 4,
- SEVL = 5
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5,
+ CSDB = 20
};
enum BarrierDomain {
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 55a09dc1c5..201dfaa423 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -168,11 +168,6 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
- (instr->Mask(0x003FFFC0) == 0x000320C0) ||
- (instr->Mask(0x003FF100) == 0x00032100) ||
- (instr->Mask(0x003FF200) == 0x00032200) ||
- (instr->Mask(0x003FF400) == 0x00032400) ||
- (instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
V::VisitUnallocated(instr);
@@ -467,6 +462,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
break;
}
+ V8_FALLTHROUGH;
}
case 1:
case 3:
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 8269e8e50a..a81621b6a9 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -33,7 +33,7 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
- masm->Add(src, masm->StackPointer(), src_offset);
+ masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
@@ -140,8 +140,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
- __ Add(fp_to_sp, __ StackPointer(),
- kSavedRegistersAreaSize + (2 * kPointerSize));
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@@ -222,7 +221,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
- __ Mov(__ StackPointer(), scratch);
+ __ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 41c654b214..d344903d59 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -968,7 +968,7 @@ void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
switch (instr->Mask(FPCompareMask)) {
case FCMP_s_zero:
- case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_d_zero: form = form_zero; V8_FALLTHROUGH;
case FCMP_s:
case FCMP_d: mnemonic = "fcmp"; break;
default: form = "(FPCompare)";
@@ -1246,6 +1246,11 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
form = nullptr;
break;
}
+ case CSDB: {
+ mnemonic = "csdb";
+ form = nullptr;
+ break;
+ }
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
switch (instr->Mask(MemBarrierMask)) {
@@ -3327,7 +3332,7 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
}
}
- if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
@@ -3339,9 +3344,9 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
AppendToOutput("%c%d", reg_char, reg.code());
}
- } else if (reg.Aliases(csp)) {
- // Disassemble w31/x31 as stack pointer wcsp/csp.
- AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
} else {
// Disassemble w31/x31 as zero register wzr/xzr.
AppendToOutput("%czr", reg_char);
@@ -3713,6 +3718,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
return 0;
}
+ UNIMPLEMENTED();
+ return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -3836,7 +3843,8 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
switch (format[1]) {
case 'D': { // NDP.
DCHECK(instr->ShiftDP() != ROR);
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case 'L': { // NLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index c12d53b7e6..0edb2ea583 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DISASM_ARM64_H
-#define V8_ARM64_DISASM_ARM64_H
+#ifndef V8_ARM64_DISASM_ARM64_H_
+#define V8_ARM64_DISASM_ARM64_H_
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@@ -96,4 +96,4 @@ class PrintDisassembler : public DisassemblingDecoder {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DISASM_ARM64_H
+#endif // V8_ARM64_DISASM_ARM64_H_
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 48909d5b2d..79d8510f9b 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -11,7 +11,7 @@ namespace internal {
static const int kX0DwarfCode = 0;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
-static const int kCSpDwarfCode = 31;
+static const int kSpDwarfCode = 31;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -8;
@@ -33,7 +33,7 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
case kRegCode_x30:
return kLrDwarfCode;
case kSPRegInternalCode:
- return kCSpDwarfCode;
+ return kSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
default:
@@ -51,8 +51,8 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
- case kCSpDwarfCode:
- return "csp"; // This could be zr as well
+ case kSpDwarfCode:
+ return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
return nullptr;
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index a337079786..00ac99d1be 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_FRAMES_ARM64_H_
-#define V8_ARM64_FRAMES_ARM64_H_
+#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
namespace v8 {
namespace internal {
@@ -61,4 +61,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_FRAMES_ARM64_H_
+#endif // V8_ARM64_FRAME_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 0cc3e803d0..499023ebb2 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -258,7 +258,7 @@ class Instruction {
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
- // The following instructions use csp or wsp as Rd:
+ // The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
@@ -272,7 +272,7 @@ class Instruction {
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
- // can set the flags. The others can all write into csp.
+ // can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
@@ -287,7 +287,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
- // The following instructions use csp or wsp as Rn:
+ // The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 8e9cce7197..f9550782c1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -91,7 +91,6 @@ static const CounterDescriptor kCounterList[] = {
{"PC Addressing", Gauge},
{"Other", Gauge},
- {"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
@@ -238,16 +237,8 @@ void Instrument::VisitPCRelAddressing(Instruction* instr) {
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
@@ -470,16 +461,8 @@ void Instrument::VisitAddSubShifted(Instruction* instr) {
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 17b058bd01..bcbe5d97dc 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -69,15 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function info
- // x2: feedback vector
- // x3: slot
- Register registers[] = {x1, x2, x3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 0861551d89..f96d4b20b8 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
-void MacroAssembler::CmovX(const Register& rd,
- const Register& rn,
+void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@@ -416,6 +415,11 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
+void TurboAssembler::Csdb() {
+ DCHECK(allow_macro_instructions());
+ csdb();
+}
+
void TurboAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -423,8 +427,7 @@ void TurboAssembler::Cset(const Register& rd, Condition cond) {
cset(rd, cond);
}
-
-void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+void TurboAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -461,14 +464,12 @@ void MacroAssembler::Csneg(const Register& rd,
csneg(rd, rn, rm, cond);
}
-
-void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
-
-void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
@@ -651,10 +652,12 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
if (bits == 0) {
fmov(vd, xzr);
} else {
- Ldr(vd, imm);
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, bits);
+ fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -678,12 +681,10 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
- // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -737,8 +738,7 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
-
-void MacroAssembler::Isb() {
+void TurboAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
@@ -748,12 +748,6 @@ void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
ldr(rt, operand);
}
-void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
- DCHECK(allow_macro_instructions());
- DCHECK(rt.Is64Bits());
- ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
-}
-
void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
@@ -1042,58 +1036,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
-void MacroAssembler::AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- DCHECK_GE(sp_alignment, 16);
- DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
-}
-
-void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
- DCHECK(!csp.Is(StackPointer()));
- if (!TmpList()->IsEmpty()) {
- Sub(csp, StackPointer(), space);
- } else {
- // TODO(jbramley): Several callers rely on this not using scratch
- // registers, so we use the assembler directly here. However, this means
- // that large immediate values of 'space' cannot be handled cleanly. (Only
- // 24-bits immediates or values of 'space' that can be encoded in one
- // instruction are accepted.) Once we implement our flexible scratch
- // register idea, we could greatly simplify this function.
- InstructionAccurateScope scope(this);
- DCHECK(space.IsImmediate());
- // Align to 16 bytes.
- uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
- DCHECK(is_uint24(imm));
-
- Register source = StackPointer();
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, source, 0xf);
- source = csp;
- }
- if (!is_uint12(imm)) {
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, source, imm_top_12_bits << 12);
- source = csp;
- imm -= imm_top_12_bits << 12;
- }
- if (imm > 0) {
- sub(csp, source, imm);
- }
- }
- AssertStackConsistency();
-}
-
-void TurboAssembler::SyncSystemStackPointer() {
- DCHECK(emit_debug_code());
- DCHECK(!csp.Is(StackPointer()));
- { InstructionAccurateScope scope(this);
- mov(csp, StackPointer());
- }
- AssertStackConsistency();
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -1249,14 +1191,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
+ DCHECK_EQ(size % 16, 0);
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@@ -1269,13 +1206,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
-
AssertPositiveOrZero(count);
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
@@ -1290,11 +1223,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@@ -1305,16 +1234,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
+ DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@@ -1329,14 +1250,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@@ -1378,14 +1292,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 3869046f74..267bc2151b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -15,6 +15,7 @@
#include "src/frame-constants.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -188,15 +189,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
- PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
- if (rd.Is(csp)) {
+ if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
- Mov(csp, temp);
- AssertStackConsistency();
+ Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@@ -294,7 +294,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
- AssertStackConsistency();
}
}
}
@@ -337,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
- // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ // If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@@ -596,11 +595,8 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
-
-void MacroAssembler::Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond) {
+void TurboAssembler::Csel(const Register& rd, const Register& rn,
+ const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -724,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
- if (rd.Is(csp)) {
+ if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
- } else if (rn.Is(csp)) {
+ } else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@@ -1105,9 +1101,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
- str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
- str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+ str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@@ -1166,9 +1162,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
- // Push up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in order
- // to maintain the 16-byte alignment for csp.
+ // Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@@ -1183,9 +1177,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- // Pop up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in
- // order to maintain the 16-byte alignment for csp.
+ // Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -1258,23 +1250,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
- str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
- stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
- stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
- str(src0, MemOperand(StackPointer(), 2 * size));
+ stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
+ str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
- // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
- stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
- stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@@ -1295,24 +1287,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
- ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
- ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
- ldr(dst2, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ ldr(dst2, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
- // registers to be popped using csp, whilst maintaining 16-byte alignment
- // for csp at all times.
- ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ // registers to be popped using sp, whilst maintaining 16-byte alignment
+ // for sp at all times.
+ ldp(dst2, dst3, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@@ -1322,43 +1314,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else {
- // Even if the current stack pointer is not the system stack pointer (csp),
- // the system stack pointer will still be modified in order to comply with
- // ABI rules about accessing memory below the system stack pointer.
- BumpSystemStackPointer(total_size);
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@@ -1376,7 +1352,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Str(src, MemOperand(StackPointer(), offset));
+ Str(src, MemOperand(sp, offset));
}
@@ -1388,14 +1364,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Ldr(dst, MemOperand(StackPointer(), offset));
+ Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
- Stp(src1, src2, MemOperand(StackPointer(), offset));
+ Stp(src1, src2, MemOperand(sp, offset));
}
@@ -1404,7 +1380,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
- Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+ Ldp(dst1, dst2, MemOperand(sp, offset));
}
@@ -1412,11 +1388,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@@ -1436,11 +1408,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@@ -1455,44 +1423,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
-void TurboAssembler::AssertStackConsistency() {
- // Avoid emitting code when !use_real_abort() since non-real aborts cause too
- // much code to be generated.
- if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer())) {
- // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
- // can't check the alignment of csp without using a scratch register (or
- // clobbering the flags), but the processor (or simulator) will abort if
- // it is not properly aligned during a load.
- ldr(xzr, MemOperand(csp, 0));
- }
- if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
- Label ok;
- // Check that csp <= StackPointer(), preserving all registers and NZCV.
- sub(StackPointer(), csp, StackPointer());
- cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
- tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
-
- // Avoid generating AssertStackConsistency checks for the Push in Abort.
- { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
- }
-
- bind(&ok);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- }
- }
-}
-
-void TurboAssembler::AssertCspAligned() {
+void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
- // TODO(titzer): use a real assert for alignment check?
+ // Arm64 requires the stack pointer to be 16-byte aligned prior to address
+ // calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
- ldr(temp, MemOperand(csp));
+ Mov(temp, sp);
+ Tst(temp, 15);
+ Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@@ -1568,11 +1507,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
- Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
+ Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
- Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
+ Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@@ -1630,6 +1569,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Swap(Register lhs, Register rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
+void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ VRegister temp = VRegister::no_reg();
+ if (lhs.IsS()) {
+ temp = temps.AcquireS();
+ } else if (lhs.IsD()) {
+ temp = temps.AcquireD();
+ } else {
+ DCHECK(lhs.IsQ());
+ temp = temps.AcquireQ();
+ }
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1792,6 +1759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ Mov(kOffHeapTrampolineRegister, bytes_address);
+ Br(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -1927,13 +1900,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- if (rmode == RelocInfo::NONE64) {
+ if (RelocInfo::IsNone(rmode)) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
@@ -2009,62 +1979,15 @@ int TurboAssembler::CallSize(Label* target) {
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type) {
- Label on_not_heap_number;
-
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, &on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
-
- Bind(&on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2110,12 +2033,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- Add(src_reg, StackPointer(),
- Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
- Add(src_reg, StackPointer(),
- (callee_args_count.immediate() + 1) * kPointerSize);
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@@ -2145,12 +2066,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(StackPointer(), src_reg);
+ Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(StackPointer(), dst_reg);
- AssertStackConsistency();
+ Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2224,12 +2144,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Mov(x4, Operand(debug_is_active));
+ Ldrsb(x4, MemOperand(x4));
+ Cbz(x4, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
+ Cbnz(x4, &call_hook);
+
+ Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(x4, &skip_hook);
+ Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
+ Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ B(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2284,7 +2220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = x4;
+ Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2343,16 +2279,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- LoadObject(x1, function);
- InvokeFunction(x1, expected, actual, flag);
-}
-
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
@@ -2402,7 +2328,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2414,21 +2340,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
- Mov(fp, csp);
+ Mov(fp, sp);
Push(type_reg, padreg);
- // csp[3] : lr
- // csp[2] : fp
- // csp[1] : type
- // csp[0] : for alignment
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2439,8 +2364,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, StackPointer(),
- TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@@ -2450,15 +2374,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(StackPointer(), fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
}
@@ -2493,7 +2414,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
- Mov(fp, StackPointer());
+ Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2540,13 +2461,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
- DCHECK(csp.Is(StackPointer()));
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
- Add(scratch, csp, kXRegSize);
+ Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -2555,8 +2474,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
- DCHECK(csp.Is(StackPointer()));
-
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@@ -2582,8 +2499,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
@@ -2752,7 +2668,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x31 (csp) because the system stack pointer doesn't need to be included
+ // - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@@ -3052,7 +2968,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
+ DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@@ -3164,12 +3080,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
- // We don't pass any arguments on the stack, but we still need to align the C
- // stack pointer to a 16-byte boundary for PCS compliance.
- if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xF);
- }
-
CallPrintf(arg_count, pcs);
}
@@ -3208,14 +3118,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
- // We can only print sp if it is the current stack pointer.
- if (!csp.Is(StackPointer())) {
- DCHECK(!csp.Aliases(arg0));
- DCHECK(!csp.Aliases(arg1));
- DCHECK(!csp.Aliases(arg2));
- DCHECK(!csp.Aliases(arg3));
- }
-
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -3224,8 +3126,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
- // If csp is the stack pointer, PushCPURegList asserts that the size of each
- // list is a multiple of 16 bytes.
+ // PushCPURegList asserts that the size of each list is a multiple of 16
+ // bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@@ -3241,15 +3143,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
- bool arg0_sp = StackPointer().Aliases(arg0);
- bool arg1_sp = StackPointer().Aliases(arg1);
- bool arg2_sp = StackPointer().Aliases(arg2);
- bool arg3_sp = StackPointer().Aliases(arg3);
+ bool arg0_sp = sp.Aliases(arg0);
+ bool arg1_sp = sp.Aliases(arg1);
+ bool arg2_sp = sp.Aliases(arg2);
+ bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
- Add(arg_sp, StackPointer(),
+ Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@@ -3302,7 +3204,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- DCHECK(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, sp));
return result;
}
@@ -3359,6 +3261,14 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
}
}
+void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
+ // We can use adr to load a pc relative location.
+ adr(rd, -pc_offset());
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Mov(kSpeculationPoisonRegister, -1);
+}
#undef __
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 47c08f2622..c72cb39536 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -47,12 +47,15 @@ namespace internal {
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
+#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
+#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
+#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -254,6 +257,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src);
+ // Register swap. Note that the register operands should be distinct.
+ void Swap(Register lhs, Register rhs);
+ void Swap(VRegister lhs, VRegister rhs);
+
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
@@ -549,6 +556,11 @@ class TurboAssembler : public Assembler {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Isb();
+ inline void Csdb();
+
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -581,20 +593,6 @@ class TurboAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
- // If emit_debug_code() is true, emit a run-time check to ensure that
- // StackPointer() does not point below the system stack pointer.
- //
- // Whilst it is architecturally legal for StackPointer() to point below csp,
- // it can be evidence of a potential bug because the ABI forbids accesses
- // below csp.
- //
- // If StackPointer() is the system stack pointer (csp), then csp will be
- // dereferenced to cause the processor (or simulator) to abort if it is not
- // properly aligned.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertStackConsistency();
-
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
@@ -614,9 +612,6 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
- // Return the stack pointer.
- inline const Register& StackPointer() const { return csp; }
-
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
@@ -650,9 +645,11 @@ class TurboAssembler : public Assembler {
inline void Cmp(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
+ void Csel(const Register& rd, const Register& rn, const Operand& operand,
+ Condition cond);
- // Emits a runtime assert that the CSP is aligned.
- void AssertCspAligned();
+ // Emits a runtime assert that the stack pointer is aligned.
+ void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@@ -687,17 +684,14 @@ class TurboAssembler : public Assembler {
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
- // Helper function for double immediate.
- inline void Ldr(const CPURegister& rt, double imm);
// Claim or drop stack space without actually accessing memory.
//
// In debug mode, both of these will write invalid data into the claimed or
// dropped space.
//
- // If the current stack pointer (according to StackPointer()) is csp, then it
- // must be aligned to 16 bytes and the size claimed or dropped must be a
- // multiple of 16 bytes.
+ // The stack pointer must be aligned to 16 bytes and the size claimed or
+ // dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
@@ -724,26 +718,6 @@ class TurboAssembler : public Assembler {
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
- // Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()).
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void SyncSystemStackPointer();
-
- // Push the system stack pointer (csp) down to allow the same to be done to
- // the current stack pointer (according to StackPointer()). This must be
- // called _before_ accessing the memory.
- //
- // This is necessary when pushing or otherwise adding things to the stack, to
- // satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed. The amount pushed will be increased as necessary
- // to ensure csp remains aligned to 16 bytes.
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void BumpSystemStackPointer(const Operand& space);
-
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
@@ -778,11 +752,6 @@ class TurboAssembler : public Assembler {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
- // Even if the current stack pointer is not the system stack pointer (csp),
- // Push (and derived methods) will still modify the system stack pointer in
- // order to comply with ABI rules about accessing memory below the system
- // stack pointer.
- //
// Other than the registers passed into Pop, the stack pointer and (possibly)
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
@@ -1011,17 +980,13 @@ class TurboAssembler : public Assembler {
inline void Clz(const Register& rd, const Register& rn);
- // Poke 'src' onto the stack. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
+ // be 16 byte aligned.
void Poke(const CPURegister& src, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
- // with 'src2' at a higher address than 'src1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
+ // stack pointer must be 16 byte aligned.
void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
@@ -1047,7 +1012,9 @@ class TurboAssembler : public Assembler {
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
Condition cond);
inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
@@ -1233,6 +1200,12 @@ class TurboAssembler : public Assembler {
inline void Fcvtas(const Register& rd, const VRegister& fn);
inline void Fcvtau(const Register& rd, const VRegister& fn);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(const Register& rd);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1257,8 +1230,8 @@ class TurboAssembler : public Assembler {
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack (csp) must be prepared by the caller as for a normal
- // AAPCS64 call to 'printf'.
+ // arguments and stack must be prepared by the caller as for a normal AAPCS64
+ // call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
@@ -1326,8 +1299,6 @@ class MacroAssembler : public TurboAssembler {
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
- void Csel(const Register& rd, const Register& rn, const Operand& operand,
- Condition cond);
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
@@ -1344,14 +1315,10 @@ class MacroAssembler : public TurboAssembler {
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void CzeroX(const Register& rd, Condition cond);
- inline void CmovX(const Register& rd, const Register& rn, Condition cond);
- inline void Csetm(const Register& rd, Condition cond);
inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
- inline void Dmb(BarrierDomain domain, BarrierType type);
- inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
inline void Fcsel(const VRegister& fd, const VRegister& fn,
@@ -1394,7 +1361,6 @@ class MacroAssembler : public TurboAssembler {
const VRegister& fm, const VRegister& fa);
inline void Hint(SystemHint code);
inline void Hlt(int code);
- inline void Isb();
inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
@@ -1641,17 +1607,13 @@ class MacroAssembler : public TurboAssembler {
};
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // The stack pointer must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
- // higher address than 'dst1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // higher address than 'dst1'. The offset is in bytes. The stack pointer must
+ // be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
@@ -1704,10 +1666,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
void PopCalleeSavedRegisters();
- // Align csp for a frame, as per ActivationFrameAlignment, and make it the
- // current stack pointer.
- inline void AlignAndSetCSPForFrame();
-
// Helpers ------------------------------------------------------------------
static int SafepointRegisterStackIndex(int reg_code);
@@ -1770,11 +1728,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
- void JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
- void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
-
// Try to represent a double as a signed 64-bit int.
// This succeeds if the result compares equal to the input, so inputs of -0.0
// are represented as 0 and handled as a success.
@@ -1817,6 +1770,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1841,9 +1797,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
// ---- Code generation helpers ----
@@ -1940,12 +1893,12 @@ class MacroAssembler : public TurboAssembler {
// Set up a stack frame and registers as follows:
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: SPOffset (new csp)
+ // fp[-8]: SPOffset (new sp)
// fp[-16]: CodeObject()
// fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
- // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // sp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
+ // sp -> sp[0]: Space reserved for the return address.
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
@@ -1960,8 +1913,6 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- //
- // The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles, const Register& scratch,
const Register& scratch2);
@@ -2030,11 +1981,6 @@ class MacroAssembler : public TurboAssembler {
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
- // At the moment it is only possible to print the value of csp if it is the
- // current stack pointer. Otherwise, the MacroAssembler will automatically
- // update csp on every push (using BumpSystemStackPointer), so determining its
- // value is difficult.
- //
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
@@ -2169,6 +2115,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
@@ -2210,7 +2157,7 @@ class InlineSmiCheckInfo {
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
- // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
//
// The generated patch information can be read using the InlineSMICheckInfo
// class.
@@ -2230,8 +2177,8 @@ class InlineSmiCheckInfo {
// Fields in the data encoded by InlineData.
- // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
- // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
+ // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
// used in a patchable check. The Emit() method checks this.
//
// Note that the total size of the fields is restricted by the underlying
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index d0c464dfbe..5c72cf1c90 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -626,16 +626,15 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
const char* Simulator::xreg_names[] = {
- "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
- "x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
- "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
- "cp", "x28", "fp", "lr", "xzr", "csp"};
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
+ "x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
const char* Simulator::wreg_names[] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
- "wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
+ "wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -768,7 +767,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
- if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
return kSPRegInternalCode;
}
return -1;
@@ -1450,7 +1449,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case BL:
set_lr(instr->following());
- // Fall through.
+ V8_FALLTHROUGH;
case B:
set_pc(instr->ImmPCOffsetTarget());
break;
@@ -1478,7 +1477,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
// this, but if we do trap to allow debugging.
Debug();
}
- // Fall through.
+ V8_FALLTHROUGH;
}
case BR:
case RET: set_pc(target); break;
@@ -1630,7 +1629,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
// Switch on the logical operation, stripping out the NOT bit, as it has a
// different meaning for logical immediate instructions.
switch (instr->Mask(LogicalOpMask & ~NOT)) {
- case ANDS: update_flags = true; // Fall through.
+ case ANDS: update_flags = true; V8_FALLTHROUGH;
case AND: result = op1 & op2; break;
case ORR: result = op1 | op2; break;
case EOR: result = op1 ^ op2; break;
@@ -2956,7 +2955,9 @@ void Simulator::VisitSystem(Instruction* instr) {
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
- case NOP: break;
+ case NOP:
+ case CSDB:
+ break;
default: UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
@@ -2996,15 +2997,15 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
- if (strcmp(desc, "csp") == 0) {
+ if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
+ clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
- } else if (strcmp(desc, "wcsp") == 0) {
+ } else if (strcmp(desc, "wsp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
+ clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
}
@@ -4396,15 +4397,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_LD1_4v:
case NEON_LD1_4v_post:
ld1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_3v:
case NEON_LD1_3v_post:
ld1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_2v:
case NEON_LD1_2v_post:
ld1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_1v:
case NEON_LD1_1v_post:
ld1(vf, vreg(reg[0]), addr[0]);
@@ -4412,15 +4416,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_ST1_4v:
case NEON_ST1_4v_post:
st1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_3v:
case NEON_ST1_3v_post:
st1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_2v:
case NEON_ST1_2v_post:
st1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_1v:
case NEON_ST1_1v_post:
st1(vf, vreg(reg[0]), addr[0]);
@@ -4533,7 +4540,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_b_post:
case NEON_LD4_b:
case NEON_LD4_b_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_b:
case NEON_ST1_b_post:
case NEON_ST2_b:
@@ -4552,7 +4560,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_h_post:
case NEON_LD4_h:
case NEON_LD4_h_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_h:
case NEON_ST1_h_post:
case NEON_ST2_h:
@@ -4572,7 +4581,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_s_post:
case NEON_LD4_s:
case NEON_LD4_s_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_s:
case NEON_ST1_s_post:
case NEON_ST2_s:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index a8f229d764..18fa4d44ec 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -646,6 +646,7 @@ class LogicVRegister {
class Simulator : public DecoderVisitor, public SimulatorBase {
public:
static void SetRedirectInstruction(Instruction* instruction);
+ static bool ICacheMatch(void* one, void* two) { return false; }
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);