summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-09-21 09:14:51 +0200
committerMichaël Zasso <targos@protonmail.com>2018-09-22 18:29:25 +0200
commit0e7ddbd3d7e9439c67573b854c49cf82c398ae82 (patch)
tree2afe372acde921cb57ddb3444ff00c5adef8848c /deps/v8/src/arm64
parent13245dc50da4cb7443c39ef6c68d419d5e6336d4 (diff)
downloadandroid-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.tar.gz
android-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.tar.bz2
android-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.zip
deps: update V8 to 7.0.276.20
PR-URL: https://github.com/nodejs/node/pull/22754 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h35
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc53
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h30
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc4
-rw-r--r--deps/v8/src/arm64/constants-arm64.h40
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h10
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc18
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc24
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h27
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc24
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h18
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc244
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h57
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc4
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h2
16 files changed, 177 insertions, 415 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 72674b87a3..52df8143ef 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -276,7 +276,7 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
shift_amount_(shift_amount) {
DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
- DCHECK(!reg.IsSP());
+ DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
}
@@ -535,7 +535,7 @@ Address Assembler::target_pointer_address_at(Address pc) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- return Memory::Address_at(target_pointer_address_at(pc));
+ return Memory<Address>(target_pointer_address_at(pc));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
@@ -549,8 +549,8 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- return GetCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2);
+ DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
+ return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2);
}
}
@@ -570,7 +570,7 @@ Address Assembler::target_address_from_return_address(Address pc) {
// Call sequence on ARM64 is:
// ldr ip0, #... @ load from literal pool
// blr ip0
- Address candidate = pc - 2 * kInstructionSize;
+ Address candidate = pc - 2 * kInstrSize;
Instruction* instr = reinterpret_cast<Instruction*>(candidate);
USE(instr);
DCHECK(instr->IsLdrLiteralX());
@@ -598,10 +598,10 @@ void Assembler::deserialization_set_special_target_at(Address location,
target = location;
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
- Assembler::FlushICache(location, kInstructionSize);
+ Assembler::FlushICache(location, kInstrSize);
} else {
DCHECK_EQ(instr->InstructionBits(), 0);
- Memory::Address_at(location) = target;
+ Memory<Address>(location) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code. However,
// in this case, only the constant pool contents change. The instruction
@@ -612,7 +612,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
@@ -620,7 +620,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- Memory::Address_at(target_pointer_address_at(pc)) = target;
+ Memory<Address>(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code. However,
// in this case, only the constant pool contents change. The instruction
@@ -635,7 +635,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, kInstructionSize);
+ Assembler::FlushICache(pc, kInstrSize);
}
}
}
@@ -711,8 +711,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -731,7 +730,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
@@ -762,9 +761,9 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_));
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ Memory<Address>(pc_) = kNullAddress;
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
@@ -874,8 +873,8 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
- DCHECK(IsAligned(offset, kInstructionSize));
- return offset >> kInstructionSizeLog2;
+ DCHECK(IsAligned(offset, kInstrSize));
+ return offset >> kInstrSizeLog2;
}
@@ -1092,7 +1091,7 @@ Instr Assembler::ImmBarrierType(int imm2) {
}
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
- DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index af3f59bd48..d41b1a7d7f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -208,10 +208,10 @@ uint32_t RelocInfo::wasm_call_tag() const {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
if (instr->IsLdrLiteralX()) {
return static_cast<uint32_t>(
- Memory::Address_at(Assembler::target_pointer_address_at(pc_)));
+ Memory<Address>(Assembler::target_pointer_address_at(pc_)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
+ return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
}
}
@@ -347,7 +347,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
first_use_ = offset;
}
- if (CanBeShared(mode)) {
+ if (RelocInfo::IsShareableRelocMode(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
} else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
@@ -391,7 +391,7 @@ int ConstPool::WorstCaseSize() {
// blr xzr
// nop
// All entries are 64-bit for now.
- return 4 * kInstructionSize + EntryCount() * kPointerSize;
+ return 4 * kInstrSize + EntryCount() * kPointerSize;
}
@@ -403,10 +403,10 @@ int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
// ldr xzr, #pool_size
// blr xzr
// nop ;; if not 64-bit aligned
- int prologue_size = require_jump ? kInstructionSize : 0;
- prologue_size += 2 * kInstructionSize;
- prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
- 0 : kInstructionSize;
+ int prologue_size = require_jump ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ prologue_size +=
+ IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
// All entries are 64-bit for now.
return prologue_size + EntryCount() * kPointerSize;
@@ -476,11 +476,6 @@ void ConstPool::Clear() {
}
-bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- return RelocInfo::IsNone(mode) || RelocInfo::IsShareableRelocMode(mode);
-}
-
-
void ConstPool::EmitMarker() {
// A constant pool size is expressed in number of 32-bits words.
// Currently all entries are 64-bit.
@@ -601,8 +596,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- UpdateCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2,
+ DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
+ UpdateCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2,
request.code_stub()->GetCode());
break;
}
@@ -959,12 +954,12 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
- return RoundUp(size, kInstructionSize) / kInstructionSize;
+ return RoundUp(size, kInstrSize) / kInstrSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
if ((instr->Mask(ExceptionMask) == HLT) &&
(instr->ImmException() == kImmExceptionIsPrintf)) {
- return kPrintfLength / kInstructionSize;
+ return kPrintfLength / kInstrSize;
}
#endif
if (IsConstantPoolAt(instr)) {
@@ -3938,7 +3933,7 @@ void Assembler::dcptr(Label* label) {
// references are not instructions so while unbound they are encoded as
// two consecutive brk instructions. The two 16-bit immediates are used
// to encode the offset.
- offset >>= kInstructionSizeLog2;
+ offset >>= kInstrSizeLog2;
DCHECK(is_int32(offset));
uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
@@ -4069,13 +4064,13 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
- DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
+ DCHECK_LE(RoundUp(len, kInstrSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
// Pad with nullptr characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
- static_assert(sizeof(pad) == kInstructionSize,
+ static_assert(sizeof(pad) == kInstrSize,
"Size of padding must match instruction size.");
- EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
+ EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
}
@@ -4103,6 +4098,10 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
return;
}
// Fall through if Serializer is enabled.
+#else
+ // Make sure we haven't dynamically enabled simulator code when there is no
+ // simulator built in.
+ DCHECK(!options().enable_simulator_code);
#endif
if (params & BREAK) {
@@ -4422,7 +4421,7 @@ bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
bool Assembler::IsImmLLiteral(int64_t offset) {
- int inst_size = static_cast<int>(kInstructionSizeLog2);
+ int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
DCHECK_GT(offset, 0);
@@ -4810,7 +4809,7 @@ void Assembler::near_call(HeapObjectRequest request) {
}
void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstructionSize;
+ int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
no_const_pool_before_ = pc_limit;
// Make sure the pool won't be blocked for too long.
@@ -4862,7 +4861,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
- int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
+ int needed_space = worst_case_size + kGap + 1 * kInstrSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
@@ -4881,7 +4880,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
- int protection_offset = 2 * kInstructionSize;
+ int protection_offset = 2 * kInstrSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
@@ -5018,10 +5017,10 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
int rd_code = expected_adr->Rd();
for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
- CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
+ CHECK(InstructionAt((i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
}
Instruction* expected_movz =
- InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
+ InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
CHECK(expected_movz->IsMovz() &&
(expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index e2945d5999..b42b80f9ca 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -407,6 +407,7 @@ constexpr Register NoReg = Register::no_reg();
constexpr VRegister NoVReg = VRegister::no_reg();
constexpr CPURegister NoCPUReg = CPURegister::no_reg();
constexpr Register no_reg = NoReg;
+constexpr VRegister no_dreg = NoVReg;
#define DEFINE_REGISTER(register_class, name, ...) \
constexpr register_class name = register_class::Create<__VA_ARGS__>()
@@ -848,7 +849,6 @@ class ConstPool {
void Clear();
private:
- bool CanBeShared(RelocInfo::Mode mode);
void EmitMarker();
void EmitGuard();
void EmitEntries();
@@ -882,7 +882,7 @@ class ConstPool {
// -----------------------------------------------------------------------------
// Assembler.
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -1008,8 +1008,6 @@ class Assembler : public AssemblerBase {
static constexpr int kSpecialTargetSize = 0;
// The sizes of the call sequences emitted by MacroAssembler::Call.
- // Wherever possible, use MacroAssembler::CallSize instead of these constants,
- // as it will choose the correct value for a given relocation mode.
//
// A "near" call is encoded in a BL immediate instruction:
// bl target
@@ -1017,8 +1015,8 @@ class Assembler : public AssemblerBase {
// whereas a "far" call will be encoded like this:
// ldr temp, =target
// blr temp
- static constexpr int kNearCallSize = 1 * kInstructionSize;
- static constexpr int kFarCallSize = 2 * kInstructionSize;
+ static constexpr int kNearCallSize = 1 * kInstrSize;
+ static constexpr int kFarCallSize = 2 * kInstrSize;
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
@@ -1034,20 +1032,10 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Check the size of the code generated since the given label. This function
- // is used primarily to work around comparisons between signed and unsigned
- // quantities, since V8 uses both.
- // TODO(jbramley): Work out what sign to use for these things and if possible,
- // change things to be consistent.
- void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- DCHECK_GE(size, 0);
- DCHECK_EQ(static_cast<uint64_t>(size), SizeOfCodeGeneratedSince(label));
- }
-
// Return the number of instructions generated from label to the
// current position.
uint64_t InstructionsGeneratedSince(const Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Prevent contant pool emission until EndBlockConstPool is called.
@@ -3198,7 +3186,7 @@ class Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
- static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
+ static constexpr int kMaxVeneerCodeSize = 1 * kInstrSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
@@ -3423,13 +3411,13 @@ class Assembler : public AssemblerBase {
// Set how far from current pc the next constant pool check will be.
void SetNextConstPoolCheckIn(int instructions) {
- next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+ next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
}
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
- STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ STATIC_ASSERT(sizeof(instruction) == kInstrSize);
DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
@@ -3614,7 +3602,7 @@ class PatchingAssembler : public Assembler {
// Note that the instruction cache will not be flushed.
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
- : Assembler(options, start, count * kInstructionSize + kGap) {
+ : Assembler(options, start, count * kInstrSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 7a5f06c492..328983f42c 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -215,7 +215,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// The entry hook is a Push (stp) instruction, followed by a near call.
static const unsigned int kProfileEntryHookCallSize =
- (1 * kInstructionSize) + Assembler::kNearCallSize;
+ (1 * kInstrSize) + Assembler::kNearCallSize;
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
@@ -249,7 +249,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ HardAbortScope hard_aborts(masm);
// Save all kCallerSaved registers (including lr), since this can be called
// from anywhere.
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 1b87ce572c..389f4818d5 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -28,10 +28,10 @@ namespace internal {
constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
-const unsigned kInstructionSize = 4;
-const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLoadLiteralScaleLog2 = 2;
-const unsigned kMaxLoadLiteralRange = 1 * MB;
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+constexpr size_t kLoadLiteralScaleLog2 = 2;
+constexpr size_t kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
@@ -42,7 +42,7 @@ const int kFirstCalleeSavedRegisterIndex = 19;
const int kNumberOfCalleeSavedVRegisters = 8;
const int kFirstCalleeSavedVRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
-const unsigned kJSCalleeSavedRegList = 0x03f80000;
+const size_t kJSCalleeSavedRegList = 0x03f80000;
const int kWRegSizeInBits = 32;
const int kWRegSizeInBitsLog2 = 5;
const int kWRegSize = kWRegSizeInBits >> 3;
@@ -329,36 +329,6 @@ inline Condition NegateCondition(Condition cond) {
return static_cast<Condition>(cond ^ 1);
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- case eq:
- return eq;
- default:
- // In practice this function is only used with a condition coming from
- // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
- // invalid as it doesn't necessary make sense to reverse it (consider
- // 'mi' for instance).
- UNREACHABLE();
- }
-}
-
enum FlagsUpdate {
SetFlags = 1,
LeaveFlags = 0
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 201dfaa423..c2181ddc40 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -106,10 +106,7 @@ void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0x4) ||
- (instr->Bits(27, 24) == 0x5) ||
- (instr->Bits(27, 24) == 0x6) ||
- (instr->Bits(27, 24) == 0x7) );
+ DCHECK_EQ(0x4, instr->Bits(27, 24) & 0xC); // 0x4, 0x5, 0x6, 0x7
switch (instr->Bits(31, 29)) {
case 0:
@@ -203,10 +200,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0x8) ||
- (instr->Bits(27, 24) == 0x9) ||
- (instr->Bits(27, 24) == 0xC) ||
- (instr->Bits(27, 24) == 0xD) );
+ DCHECK_EQ(0x8, instr->Bits(27, 24) & 0xA); // 0x8, 0x9, 0xC, 0xD
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index b2f534ac45..cb8925f779 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -277,7 +277,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Size of an entry of the second level deopt table. Since we do not generate
// a table for ARM64, the size is zero.
-const int Deoptimizer::table_entry_size_ = 0 * kInstructionSize;
+const int Deoptimizer::table_entry_size_ = 0 * kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index d344903d59..4c7ce77e4a 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -3917,7 +3917,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
case 'e': offset = instr->ImmTestBranch(); break;
default: UNREACHABLE();
}
- offset <<= kInstructionSizeLog2;
+ offset <<= kInstrSizeLog2;
char sign = '+';
if (offset < 0) {
sign = '-';
@@ -4106,21 +4106,15 @@ class BufferDisassembler : public v8::internal::DisassemblingDecoder {
v8::internal::Vector<char> out_buffer_;
};
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() { USE(converter_); }
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instr) {
+ USE(converter_); // avoid unused field warning
v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
BufferDisassembler disasm(buffer);
decoder.AppendVisitor(&disasm);
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
- return v8::internal::kInstructionSize;
+ return v8::internal::kInstrSize;
}
@@ -4129,13 +4123,13 @@ int Disassembler::ConstantPoolSizeAt(byte* instr) {
reinterpret_cast<v8::internal::Instruction*>(instr));
}
-
-void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end,
+ UnimplementedOpcodeAction) {
v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
v8::internal::PrintDisassembler disasm(file);
decoder.AppendVisitor(&disasm);
- for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstrSize) {
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
}
}
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 4a10594590..503f31050f 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -159,7 +159,7 @@ double Instruction::ImmNEONFP64() const {
unsigned CalcLSDataSize(LoadStoreOp op) {
DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
- kInstructionSize * 8);
+ kInstrSize * 8);
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
@@ -197,16 +197,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstructionSizeLog2;
+ offset = ImmBranch() << kInstrSizeLog2;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
- offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
+ offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
- offset = ImmLLiteral() << kInstructionSizeLog2;
+ offset = ImmLLiteral() << kInstrSizeLog2;
}
return offset;
}
@@ -260,10 +260,10 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
void Instruction::SetBranchImmTarget(Instruction* target) {
- DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
- DCHECK(IsValidImmPCOffset(BranchType(),
- DistanceTo(target) >> kInstructionSizeLog2));
- int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
+ DCHECK(IsAligned(DistanceTo(target), kInstrSize));
+ DCHECK(
+ IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
+ int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
switch (BranchType()) {
@@ -295,10 +295,10 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(
const AssemblerOptions& options, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
- DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
- DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
+ DCHECK(IsAligned(DistanceTo(target), kInstrSize));
+ DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
int32_t target_offset =
- static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
+ static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@@ -310,7 +310,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
- DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
+ DCHECK(IsAligned(DistanceTo(source), kInstrSize));
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index bb1791becb..9ea15e55ad 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -104,11 +104,11 @@ class Instruction {
}
V8_INLINE const Instruction* following(int count = 1) const {
- return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE Instruction* following(int count = 1) {
- return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE const Instruction* preceding(int count = 1) const {
@@ -329,9 +329,8 @@ class Instruction {
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
- return
- (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
- kInstructionSize;
+ return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
+ kInstrSize;
}
int ImmBranch() const {
@@ -419,14 +418,14 @@ class Instruction {
V8_INLINE const Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
// The FUZZ_disasm test relies on no check being done.
- DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
V8_INLINE Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
// The FUZZ_disasm test relies on no check being done.
- DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
@@ -534,9 +533,9 @@ const Instr kImmExceptionIsPrintf = 0xdeb1;
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
-const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
-const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
-const unsigned kPrintfLength = 3 * kInstructionSize;
+const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
+const unsigned kPrintfLength = 3 * kInstrSize;
const unsigned kPrintfMaxArgCount = 4;
@@ -557,12 +556,12 @@ const Instr kImmExceptionIsDebug = 0xdeb0;
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a nullptr-terminated ASCII string, padded to
-// kInstructionSize so that subsequent instructions are correctly aligned.
+// kInstrSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
-const unsigned kDebugCodeOffset = 1 * kInstructionSize;
-const unsigned kDebugParamsOffset = 2 * kInstructionSize;
-const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+const unsigned kDebugCodeOffset = 1 * kInstrSize;
+const unsigned kDebugParamsOffset = 2 * kInstrSize;
+const unsigned kDebugMessageOffset = 3 * kInstrSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 357161d57f..bb1c22aff5 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -250,30 +250,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (argc)
- x11, // address of first argument (argv)
- x1 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index b583d7ba14..62594241ec 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -780,18 +780,6 @@ void TurboAssembler::Mneg(const Register& rd, const Register& rn,
mneg(rd, rn, rm);
}
-void TurboAssembler::Mov(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions());
- DCHECK(!rd.IsZero());
- // Emit a register move only if the registers are distinct, or if they are
- // not X registers. Note that mov(w0, w0) is not a no-op because it clears
- // the top word of x0.
- if (!rd.Is(rn) || !rd.Is64Bits()) {
- Assembler::mov(rd, rn);
- }
-}
-
-
void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -830,6 +818,12 @@ void TurboAssembler::Rbit(const Register& rd, const Register& rn) {
rbit(rd, rn);
}
+void TurboAssembler::Rev(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
+ DCHECK(!rd.IsZero());
+ rev(rd, rn);
+}
+
void TurboAssembler::Ret(const Register& xn) {
DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 74583523af..b15ab47473 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
@@ -305,23 +306,35 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
+ Address addr = static_cast<Address>(operand.ImmediateValue());
+ ExternalReference reference = bit_cast<ExternalReference>(addr);
+ IndirectLoadExternalReference(rd, reference);
+ return;
+ } else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
+ Handle<HeapObject> x(
+ reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
+ IndirectLoadConstant(rd, x);
+ return;
+ }
+ }
+ }
Ldr(dst, operand);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mov(dst, operand.ImmediateValue());
-
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
// could also be achieved using an orr instruction (like orn used by Mvn),
// but using a shift instruction makes the disassembly clearer.
EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
-
} else if (operand.IsExtendedRegister()) {
// Emit an extend instruction if moving an extended register. This handles
// extend with post-shift operations, too.
EmitExtendShift(dst, operand.reg(), operand.extend(),
operand.shift_amount());
-
} else {
// Otherwise, emit a register move only if the registers are distinct, or
// if they are not X registers.
@@ -347,16 +360,6 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
-void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(rd, reference);
- return;
- }
- }
- Mov(rd, Operand(reference));
-}
-
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
@@ -1049,8 +1052,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
@@ -1062,8 +1065,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
@@ -1078,9 +1081,9 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(count, size, dst0, dst1, dst2, dst3);
- PopPostamble(count, size);
}
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
@@ -1095,31 +1098,26 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(4, size, dst0, dst1, dst2, dst3);
PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
- PopPostamble(count, size);
}
void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
+ DCHECK_EQ(0, size % 16);
- PushPreamble(size);
// Reserve room for src0 and push src1.
str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
-
-void MacroAssembler::PushPopQueue::PushQueued(
- PreambleDirective preamble_directive) {
+void MacroAssembler::PushPopQueue::PushQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
- if (preamble_directive == WITH_PREAMBLE) {
- masm_->PushPreamble(size_);
- }
-
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
@@ -1141,6 +1139,7 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
size_t count = queued_.size();
@@ -1159,14 +1158,13 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]);
}
- masm_->PopPostamble(size_);
queued_.clear();
}
void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
- PushPreamble(registers.Count(), size);
// Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
@@ -1181,6 +1179,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
// Pop up to four registers at a time.
while (!registers.IsEmpty()) {
@@ -1192,12 +1191,9 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
- PopPostamble(registers.Count(), size);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
- PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
@@ -1316,39 +1312,6 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
}
-void TurboAssembler::PushPreamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PopPostamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PushPreamble(int count, int size) {
- PushPreamble(count * size);
-}
-void TurboAssembler::PopPostamble(int count, int size) {
- PopPostamble(count * size);
-}
-
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
@@ -1429,7 +1392,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code()) {
- TrapOnAbortScope trap_on_abort_scope(this); // Avoid calls to Abort.
+ HardAbortScope hard_abort(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
@@ -1563,24 +1526,12 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
- Move(result, Handle<HeapObject>::cast(object));
+ Mov(result, Handle<HeapObject>::cast(object));
} else {
Mov(result, Operand(Smi::cast(*object)));
}
}
-void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
-
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadConstant(dst, value);
- return;
- }
- }
- Mov(dst, value);
-}
-
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
@@ -1717,14 +1668,12 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
Operand operand = Operand::EmbeddedCode(stub);
near_call(operand.heap_object_request());
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize);
-#endif
+ DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallStub(CodeStub* stub) {
@@ -1871,9 +1820,9 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
- Move(destination, kRootRegister);
+ Mov(destination, kRootRegister);
} else {
- Add(destination, kRootRegister, Operand(offset));
+ Add(destination, kRootRegister, offset);
}
}
@@ -1896,7 +1845,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstructionSize;
+ uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
Mov(temp, Immediate(imm, rmode));
Br(temp);
}
@@ -1916,8 +1865,8 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
// address at this point, and needs to be encoded as-is.
if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
}
return offset;
}
@@ -1950,6 +1899,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -1970,26 +1920,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Register target) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
Blr(target);
-
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
-#endif
}
-// TurboAssembler::CallSize is sensitive to changes in this function, as it
-// requires to know how many instructions are used to branch to the target.
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
@@ -1998,17 +1933,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
} else {
IndirectCall(target, rmode);
}
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
-#endif
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
@@ -2029,6 +1957,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2045,19 +1974,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
} else {
IndirectCall(code.address(), rmode);
}
-
-#ifdef DEBUG
- // Check the size of the code generated.
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode));
-#endif
}
void TurboAssembler::Call(ExternalReference target) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- // Immediate is in charge of setting the relocation mode to
- // EXTERNAL_REFERENCE.
- Mov(temp, Immediate(target));
+ Mov(temp, target);
Call(temp);
}
@@ -2078,8 +2000,8 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
@@ -2091,29 +2013,12 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
-#endif
-}
-
-int TurboAssembler::CallSize(Register target) {
- USE(target);
- return kInstructionSize;
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
- USE(target);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- USE(code);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
+ DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2536,7 +2441,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
- Move(scratch, CodeObject());
+ Mov(scratch, CodeObject());
Push(scratch, padreg);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
@@ -2810,7 +2715,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::CheckPageFlag(const Register& object,
const Register& scratch, int mask,
Condition cc, Label* condition_met) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
@@ -2822,7 +2727,7 @@ void MacroAssembler::CheckPageFlag(const Register& object,
void TurboAssembler::CheckPageFlagSet(const Register& object,
const Register& scratch, int mask,
Label* if_any_set) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAnySet(scratch, mask, if_any_set);
}
@@ -2830,7 +2735,7 @@ void TurboAssembler::CheckPageFlagSet(const Register& object,
void TurboAssembler::CheckPageFlagClear(const Register& object,
const Register& scratch, int mask,
Label* if_all_clear) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAllClear(scratch, mask, if_all_clear);
}
@@ -2930,8 +2835,8 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
@@ -3044,38 +2949,26 @@ void TurboAssembler::Abort(AbortReason reason) {
RegList old_tmp_list = TmpList()->list();
TmpList()->Combine(MacroAssembler::DefaultTmpList());
- if (use_real_aborts()) {
- // Avoid infinite recursion; Push contains some assertions that use Abort.
- NoUseRealAbortsScope no_real_aborts(this);
-
- Move(x1, Smi::FromInt(static_cast<int>(reason)));
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- } else {
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- }
- } else {
- // Load the string to pass to Printf.
- Label msg_address;
- Adr(x0, &msg_address);
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ Mov(w0, static_cast<int>(reason));
+ Call(ExternalReference::abort_with_reason());
+ return;
+ }
- // Call Printf directly to report the error.
- CallPrintf();
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ HardAbortScope hard_aborts(this);
- // We need a way to stop execution on both the simulator and real hardware,
- // and Unreachable() is the best option.
- Unreachable();
+ Mov(x1, Smi::FromInt(static_cast<int>(reason)));
- // Emit the message string directly in the instruction stream.
- {
- BlockPoolsScope scope(this);
- Bind(&msg_address);
- EmitStringData(GetAbortReason(reason));
- }
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
TmpList()->set_list(old_tmp_list);
@@ -3216,7 +3109,8 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
- { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ {
+ InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
dc32(arg_count); // kPrintfArgCountOffset
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index a73fc2f47b..a2862748a6 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -63,6 +63,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = x2;
constexpr Register kOffHeapTrampolineRegister = ip0;
constexpr Register kRuntimeCallFunctionRegister = x1;
constexpr Register kRuntimeCallArgCountRegister = x0;
+constexpr Register kRuntimeCallArgvRegister = x11;
constexpr Register kWasmInstanceRegister = x7;
#define LS_MACRO_LIST(V) \
@@ -177,7 +178,7 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -185,27 +186,6 @@ class TurboAssembler : public TurboAssemblerBase {
: TurboAssemblerBase(isolate, options, buffer, buffer_size,
create_code_object) {}
- // The Abort method should call a V8 runtime function, but the CallRuntime
- // mechanism depends on CEntry. If use_real_aborts is false, Abort will
- // use a simpler abort mechanism that doesn't depend on CEntry.
- //
- // The purpose of this is to allow Aborts to be compiled whilst CEntry is
- // being generated.
- bool use_real_aborts() const { return use_real_aborts_; }
-
- class NoUseRealAbortsScope {
- public:
- explicit NoUseRealAbortsScope(TurboAssembler* tasm)
- : saved_(tasm->use_real_aborts_), tasm_(tasm) {
- tasm_->use_real_aborts_ = false;
- }
- ~NoUseRealAbortsScope() { tasm_->use_real_aborts_ = saved_; }
-
- private:
- bool saved_;
- TurboAssembler* tasm_;
- };
-
#if DEBUG
void set_allow_macro_instructions(bool value) {
allow_macro_instructions_ = value;
@@ -233,9 +213,7 @@ class TurboAssembler : public TurboAssemblerBase {
void Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
- void Mov(const Register& rd, ExternalReference reference);
void Mov(const Register& rd, uint64_t imm);
- inline void Mov(const Register& rd, const Register& rm);
void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions());
@@ -256,8 +234,6 @@ class TurboAssembler : public TurboAssemblerBase {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
- void Move(Register dst, Register src);
- void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
@@ -833,15 +809,6 @@ class TurboAssembler : public TurboAssemblerBase {
void CheckPageFlagClear(const Register& object, const Register& scratch,
int mask, Label* if_all_clear);
- // Perform necessary maintenance operations before a push or after a pop.
- //
- // Note that size is specified in bytes.
- void PushPreamble(Operand total_size);
- void PopPostamble(Operand total_size);
-
- void PushPreamble(int count, int size);
- void PopPostamble(int count, int size);
-
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
@@ -900,13 +867,6 @@ class TurboAssembler : public TurboAssemblerBase {
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode);
- // For every Call variant, there is a matching CallSize function that returns
- // the size (in bytes) of the call sequence.
- static int CallSize(Register target);
- int CallSize(Address target, RelocInfo::Mode rmode);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
-
// Calls a C function.
// The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
@@ -979,6 +939,7 @@ class TurboAssembler : public TurboAssemblerBase {
inline void Fmin(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Rbit(const Register& rd, const Register& rn);
+ inline void Rev(const Register& rd, const Register& rn);
enum AdrHint {
// The target must be within the immediate range of adr.
@@ -1274,8 +1235,6 @@ class TurboAssembler : public TurboAssemblerBase {
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
- bool use_real_aborts_ = true;
-
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
@@ -1609,7 +1568,7 @@ class MacroAssembler : public TurboAssembler {
// register sizes and types.
class PushPopQueue {
public:
- explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
+ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) {}
~PushPopQueue() {
DCHECK(queued_.empty());
@@ -1620,11 +1579,7 @@ class MacroAssembler : public TurboAssembler {
queued_.push_back(rt);
}
- enum PreambleDirective {
- WITH_PREAMBLE,
- SKIP_PREAMBLE
- };
- void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
+ void PushQueued();
void PopQueued();
private:
@@ -2076,7 +2031,7 @@ class InstructionAccurateScope BASE_EMBEDDED {
: tasm_(tasm)
#ifdef DEBUG
,
- size_(count * kInstructionSize)
+ size_(count * kInstrSize)
#endif
{
// Before blocking the const pool, see if it needs to be emitted.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 09c447fdb5..5df4361c1b 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -1081,7 +1081,7 @@ void Simulator::CheckBreakNext() {
void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
- Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ Instruction* end = start->InstructionAtOffset(count * kInstrSize);
for (Instruction* pc = start; pc < end; pc = pc->following()) {
disassembler_decoder_->Decode(pc);
}
@@ -3415,7 +3415,7 @@ void Simulator::VisitException(Instruction* instr) {
// The stop parameters are inlined in the code. Skip them:
// - Skip to the end of the message string.
size_t size = kDebugMessageOffset + strlen(message) + 1;
- pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
+ pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstrSize));
// - Verify that the unreachable marker is present.
DCHECK(pc_->Mask(ExceptionMask) == HLT);
DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 4bd9294c2f..c97a759d1b 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -774,7 +774,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
void ExecuteInstruction() {
- DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstrSize));
CheckBreakNext();
Decode(pc_);
increment_pc();