aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-06-06 10:28:14 +0200
committerMichaël Zasso <targos@protonmail.com>2017-06-07 10:33:31 +0200
commit3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09 (patch)
tree9dee56e142638b34f1eccbd0ad88c3bce5377c29 /deps/v8/src/arm64
parent91a1bbe3055a660194ca4d403795aa0c03e9d056 (diff)
downloadandroid-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.gz
android-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.bz2
android-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.zip
deps: update V8 to 5.9.211.32
PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h55
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc63
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h182
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc487
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h40
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc2
-rw-r--r--deps/v8/src/arm64/constants-arm64.h11
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc53
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc11
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc1
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc21
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h10
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc60
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc56
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h27
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc182
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h93
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc314
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h91
19 files changed, 808 insertions, 951 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 5242387a42..f6bb6a8893 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsWasmSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@@ -691,32 +691,28 @@ Address RelocInfo::constant_pool_entry_address() {
return Assembler::target_pointer_address_at(pc_);
}
-
-Object* RelocInfo::target_object() {
+HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+ return HeapObject::cast(
+ reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
}
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(
+ reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
}
-
-void RelocInfo::set_target_object(Object* target,
+void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(isolate_, pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target));
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -745,13 +741,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-
-void RelocInfo::set_target_runtime_entry(Address target,
+void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(target, write_barrier_mode, icache_flush_mode);
+ set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
}
@@ -776,13 +771,11 @@ void RelocInfo::set_target_cell(Cell* cell,
}
-static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
-
-Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on ARM64.
- return Handle<Object>();
+ return Handle<Code>();
}
@@ -813,27 +806,25 @@ Address RelocInfo::debug_call_address() {
return Assembler::target_address_at(pc_, host_);
}
-
-void RelocInfo::set_debug_call_address(Address target) {
+void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
- Assembler::set_target_address_at(isolate_, pc_, host_, target);
+ Assembler::set_target_address_at(isolate, pc_, host_, target);
if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
+ Code* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ target_code);
}
}
-
-void RelocInfo::WipeOut() {
+void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
- Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, NULL);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 3002d7c250..ac6931dec7 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -28,7 +28,6 @@
#if V8_TARGET_ARCH_ARM64
-#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
@@ -200,13 +199,14 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
}
void RelocInfo::unchecked_update_wasm_memory_reference(
- Address address, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+ Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
+ // No icache flushing needed, see comment in set_target_address_at.
}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
@@ -528,7 +528,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
}
assm_->dc64(data);
}
@@ -544,7 +544,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@@ -553,8 +553,8 @@ void ConstPool::EmitEntries() {
// Assembler
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
+ : AssemblerBase(isolate_data, buffer, buffer_size),
constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_() {
@@ -675,22 +675,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(isolate(), next_link);
+ prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(isolate(), label_veneer);
+ link->SetImmPCOffsetTarget(isolate_data(), label_veneer);
link = next_link;
}
} else {
@@ -761,10 +761,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- PatchingAssembler patcher(isolate(), link, 2);
+ PatchingAssembler patcher(isolate_data(), reinterpret_cast<byte*>(link),
+ 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
- link->SetImmPCOffsetTarget(isolate(),
+ link->SetImmPCOffsetTarget(isolate_data(),
reinterpret_cast<Instruction*>(pc_));
}
@@ -1697,19 +1698,19 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
void Assembler::ldar(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
- Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
- Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
- Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxr(const Register& rs, const Register& rt,
@@ -1717,25 +1718,25 @@ void Assembler::stlxr(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
- Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldarb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxrb(const Register& rs, const Register& rt,
@@ -1743,25 +1744,25 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldarh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxrh(const Register& rs, const Register& rt,
@@ -1769,7 +1770,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
- Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+ Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::mov(const Register& rd, const Register& rm) {
@@ -2948,7 +2949,7 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
@@ -2978,8 +2979,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
- rmode, RecordedAstId().ToInt(), NULL);
+ RelocInfo reloc_info_with_ast_id(reinterpret_cast<byte*>(pc_), rmode,
+ RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -3068,7 +3069,7 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
- RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
+ RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL,
static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@@ -3111,7 +3112,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(isolate(), veneer);
+ branch->SetImmPCOffsetTarget(isolate_data(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 460ac44d7a..ea1d94f628 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -10,6 +10,7 @@
#include <map>
#include <vector>
+#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
#include "src/globals.h"
@@ -63,8 +64,8 @@ namespace internal {
R(d25) R(d26) R(d27) R(d28)
// clang-format on
-static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-
+constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
// Some CPURegister methods can return Register and FPRegister types, so we
// need to declare them in advance.
@@ -90,6 +91,11 @@ struct CPURegister {
kNoRegister
};
+ constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
+
+ constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type)
+ : reg_code(reg_code), reg_size(reg_size), reg_type(reg_type) {}
+
static CPURegister Create(int code, int size, RegisterType type) {
CPURegister r = {code, size, type};
return r;
@@ -138,25 +144,9 @@ struct Register : public CPURegister {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
- Register() {
- reg_code = 0;
- reg_size = 0;
- reg_type = CPURegister::kNoRegister;
- }
-
- explicit Register(const CPURegister& r) {
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- DCHECK(IsValidOrNone());
- }
+ constexpr Register() : CPURegister() {}
- Register(const Register& r) { // NOLINT(runtime/explicit)
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- DCHECK(IsValidOrNone());
- }
+ constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
@@ -170,7 +160,7 @@ struct Register : public CPURegister {
// These memebers are necessary for compilation.
// A few of them may be unused for now.
- static const int kNumRegisters = kNumberOfRegisters;
+ static constexpr int kNumRegisters = kNumberOfRegisters;
STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; }
@@ -197,8 +187,8 @@ struct Register : public CPURegister {
// End of V8 compatibility section -----------------------
};
-static const bool kSimpleFPAliasing = true;
-static const bool kSimdMaskRegisters = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
struct FPRegister : public CPURegister {
enum Code {
@@ -214,25 +204,9 @@ struct FPRegister : public CPURegister {
CPURegister::Create(code, size, CPURegister::kFPRegister));
}
- FPRegister() {
- reg_code = 0;
- reg_size = 0;
- reg_type = CPURegister::kNoRegister;
- }
+ constexpr FPRegister() : CPURegister() {}
- explicit FPRegister(const CPURegister& r) {
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- DCHECK(IsValidOrNone());
- }
-
- FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- DCHECK(IsValidOrNone());
- }
+ constexpr explicit FPRegister(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
@@ -243,7 +217,7 @@ struct FPRegister : public CPURegister {
static FPRegister DRegFromCode(unsigned code);
// Start of V8 compatibility section ---------------------
- static const int kMaxNumRegisters = kNumberOfFPRegisters;
+ static constexpr int kMaxNumRegisters = kNumberOfFPRegisters;
STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
// Crankshaft can use all the FP registers except:
@@ -261,54 +235,41 @@ struct FPRegister : public CPURegister {
STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
-
-#if defined(ARM64_DEFINE_REG_STATICS)
-#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
- const CPURegister init_##register_class##_##name = {code, size, type}; \
- const register_class& name = *reinterpret_cast<const register_class*>( \
- &init_##register_class##_##name)
-#define ALIAS_REGISTER(register_class, alias, name) \
- const register_class& alias = *reinterpret_cast<const register_class*>( \
- &init_##register_class##_##name)
-#else
-#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
- extern const register_class& name
+#define DEFINE_REGISTER(register_class, name, code, size, type) \
+ constexpr register_class name { CPURegister(code, size, type) }
#define ALIAS_REGISTER(register_class, alias, name) \
- extern const register_class& alias
-#endif // defined(ARM64_DEFINE_REG_STATICS)
+ constexpr register_class alias = name
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and FPRegister
// variants are provided for convenience.
-INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
-INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
-INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+DEFINE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
// v8 compatibility.
-INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+DEFINE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
-#define DEFINE_REGISTERS(N) \
- INITIALIZE_REGISTER(Register, w##N, N, \
- kWRegSizeInBits, CPURegister::kRegister); \
- INITIALIZE_REGISTER(Register, x##N, N, \
- kXRegSizeInBits, CPURegister::kRegister);
+#define DEFINE_REGISTERS(N) \
+ DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits, CPURegister::kRegister); \
+ DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits, CPURegister::kRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
- CPURegister::kRegister);
-INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
- CPURegister::kRegister);
+DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
+ CPURegister::kRegister);
+DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
+ CPURegister::kRegister);
-#define DEFINE_FPREGISTERS(N) \
- INITIALIZE_REGISTER(FPRegister, s##N, N, \
- kSRegSizeInBits, CPURegister::kFPRegister); \
- INITIALIZE_REGISTER(FPRegister, d##N, N, \
- kDRegSizeInBits, CPURegister::kFPRegister);
+#define DEFINE_FPREGISTERS(N) \
+ DEFINE_REGISTER(FPRegister, s##N, N, kSRegSizeInBits, \
+ CPURegister::kFPRegister); \
+ DEFINE_REGISTER(FPRegister, d##N, N, kDRegSizeInBits, \
+ CPURegister::kFPRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS
-#undef INITIALIZE_REGISTER
+#undef DEFINE_REGISTER
// Registers aliases.
ALIAS_REGISTER(Register, ip0, x16);
@@ -566,8 +527,8 @@ class Immediate {
// -----------------------------------------------------------------------------
// Operands.
-const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
// Represents an operand in a machine instruction.
class Operand {
@@ -756,7 +717,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : Assembler(IsolateData(isolate), buffer, buffer_size) {}
+ Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler();
@@ -807,6 +770,7 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
@@ -836,7 +800,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
- static const int kSpecialTargetSize = kPointerSize;
+ static constexpr int kSpecialTargetSize = kPointerSize;
// The sizes of the call sequences emitted by MacroAssembler::Call.
// Wherever possible, use MacroAssembler::CallSize instead of these constants,
@@ -851,8 +815,8 @@ class Assembler : public AssemblerBase {
// With relocation:
// ldr temp, =target
// blr temp
- static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
- static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+ static constexpr int kCallSizeWithoutRelocation = 4 * kInstructionSize;
+ static constexpr int kCallSizeWithRelocation = 2 * kInstructionSize;
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
@@ -884,12 +848,12 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
- static const int kPatchDebugBreakSlotAddressOffset = 0;
+ static constexpr int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
- static const int kDebugBreakSlotInstructions = 5;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstructionSize;
+ static constexpr int kDebugBreakSlotInstructions = 5;
+ static constexpr int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
@@ -1847,7 +1811,7 @@ class Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
- static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+ static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
@@ -2000,7 +1964,7 @@ class Assembler : public AssemblerBase {
// suitable for fields that take instruction offsets.
inline int LinkAndGetInstructionOffsetTo(Label* label);
- static const int kStartOfLabelLinkChain = 0;
+ static constexpr int kStartOfLabelLinkChain = 0;
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const * label);
@@ -2061,17 +2025,17 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstPoolInterval = 128;
+ static constexpr int kCheckConstPoolInterval = 128;
// Distance to first use after a which a pool will be emitted. Pool entries
// are accessed with pc relative load therefore this cannot be more than
// 1 * MB. Since constant pool emission checks are interval based this value
// is an approximation.
- static const int kApproxMaxDistToConstPool = 64 * KB;
+ static constexpr int kApproxMaxDistToConstPool = 64 * KB;
// Number of pool entries after which a pool will be emitted. Since constant
// pool emission checks are interval based this value is an approximation.
- static const int kApproxMaxPoolEntryCount = 512;
+ static constexpr int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -2082,8 +2046,9 @@ class Assembler : public AssemblerBase {
// Relocation info generation
// Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
@@ -2121,7 +2086,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries, and debug strings encoded in the instruction
// stream.
- static const int kGap = 128;
+ static constexpr int kGap = 128;
public:
class FarBranchInfo {
@@ -2151,13 +2116,13 @@ class Assembler : public AssemblerBase {
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
- static const int kVeneerDistanceMargin = 1 * KB;
+ static constexpr int kVeneerDistanceMargin = 1 * KB;
// The factor of 2 is a finger in the air guess. With a default margin of
// 1KB, that leaves us an addional 256 instructions to avoid generating a
// protective branch.
- static const int kVeneerNoProtectionFactor = 2;
- static const int kVeneerDistanceCheckMargin =
- kVeneerNoProtectionFactor * kVeneerDistanceMargin;
+ static constexpr int kVeneerNoProtectionFactor = 2;
+ static constexpr int kVeneerDistanceCheckMargin =
+ kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
@@ -2195,14 +2160,18 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
- PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
- : Assembler(isolate, reinterpret_cast<byte*>(start),
- count * kInstructionSize + kGap) {
- StartBlockPools();
- }
+ // This version will flush at destruction.
PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
- : Assembler(isolate, start, count * kInstructionSize + kGap) {
+ : PatchingAssembler(IsolateData(isolate), start, count) {
+ CHECK_NOT_NULL(isolate);
+ isolate_ = isolate;
+ }
+
+ // This version will not flush.
+ PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
+ : Assembler(isolate_data, start, count * kInstructionSize + kGap),
+ isolate_(nullptr) {
// Block constant pool emission.
StartBlockPools();
}
@@ -2217,13 +2186,16 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- Assembler::FlushICache(isolate(), buffer_, length);
+ if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
- static const int kAdrFarPatchableNNops = 2;
- static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
+ static constexpr int kAdrFarPatchableNNops = 2;
+ static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
+
+ private:
+ Isolate* isolate_;
};
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 082565f20c..ec00581566 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -4,20 +4,25 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/code-stubs.h"
#include "src/api-arguments.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/frames-arm64.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/counters.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/arm64/code-stubs-arm64.h"
-#include "src/arm64/frames-arm64.h"
+#include "src/arm64/code-stubs-arm64.h" // Cannot be the first include.
namespace v8 {
namespace internal {
@@ -1264,223 +1269,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec);
+ // This case is handled prior to the RegExpExecStub call.
+ __ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // jssp[0]: last_match_info (expected JSArray)
- // jssp[8]: previous index
- // jssp[16]: subject string
- // jssp[24]: JSRegExp object
- Label runtime;
-
- // Use of registers for this function.
-
- // Variable registers:
- // x10-x13 used as scratch registers
- // w0 string_type type of subject string
- // x2 jsstring_length subject string length
- // x3 jsregexp_object JSRegExp object
- // w4 string_encoding Latin1 or UC16
- // w5 sliced_string_offset if the string is a SlicedString
- // offset to the underlying string
- // w6 string_representation groups attributes of the string:
- // - is a string
- // - type of the string
- // - is a short external string
- Register string_type = w0;
- Register jsstring_length = x2;
- Register jsregexp_object = x3;
- Register string_encoding = w4;
- Register sliced_string_offset = w5;
- Register string_representation = w6;
-
- // These are in callee save registers and will be preserved by the call
- // to the native RegExp code, as this code is called using the normal
- // C calling convention. When calling directly from generated code the
- // native RegExp code will not do a GC and therefore the content of
- // these registers are safe to use after the call.
-
- // x19 subject subject string
- // x20 regexp_data RegExp data (FixedArray)
- // x21 last_match_info_elements info relative to the last match
- // (FixedArray)
- // x22 code_object generated regexp code
- Register subject = x19;
- Register regexp_data = x20;
- Register last_match_info_elements = x21;
- Register code_object = x22;
-
- // Stack frame.
- // jssp[00]: last_match_info (JSArray)
- // jssp[08]: previous index
- // jssp[16]: subject string
- // jssp[24]: JSRegExp object
-
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ Mov(x10, address_of_regexp_stack_memory_size);
- __ Ldr(x10, MemOperand(x10));
- __ Cbz(x10, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- DCHECK(jssp.Is(__ StackPointer()));
- __ Peek(jsregexp_object, kJSRegExpOffset);
- __ JumpIfSmi(jsregexp_object, &runtime);
- __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(regexp_data, kSmiTagMask);
- __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
- __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
- __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
- }
-
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
- __ B(ne, &runtime);
-
- // Check that the number of captures fit in the static offsets vector buffer.
- // We have always at least one capture for the whole match, plus additional
- // ones due to capturing parentheses. A capture takes 2 registers.
- // The number of capture registers then is (number_of_captures + 1) * 2.
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(regexp_data,
- JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // number_of_captures * 2 <= offsets vector size - 2
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ Add(x10, x10, x10);
- __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
- __ B(hi, &runtime);
-
- // Initialize offset for possibly sliced string.
- __ Mov(sliced_string_offset, 0);
-
- DCHECK(jssp.Is(__ StackPointer()));
- __ Peek(subject, kSubjectOffset);
- __ JumpIfSmi(subject, &runtime);
-
- __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
-
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (4).
- // (2) Sequential or cons? If not, go to (5).
- // (3) Cons string. If the string is flat, replace subject with first string
- // and go to (1). Otherwise bail out to runtime.
- // (4) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (5) Long external string? If not, go to (7).
- // (6) External string. Make it, offset-wise, look like a sequential string.
- // Go to (4).
- // (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced or thin string. Replace subject with parent. Go to (1).
-
- Label check_underlying; // (1)
- Label seq_string; // (4)
- Label not_seq_nor_cons; // (5)
- Label external_string; // (6)
- Label not_long_external; // (7)
-
- __ Bind(&check_underlying);
- __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-
- // (1) Sequential string? If yes, go to (4).
- __ And(string_representation,
- string_type,
- kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask);
- // We depend on the fact that Strings of type
- // SeqString and not ShortExternalString are defined
- // by the following pattern:
- // string_type: 0XX0 XX00
- // ^ ^ ^^
- // | | ||
- // | | is a SeqString
- // | is not a short external String
- // is a String
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ Cbz(string_representation, &seq_string); // Go to (4).
-
- // (2) Sequential or cons? If not, go to (5).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kThinStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ Cmp(string_representation, kExternalStringTag);
- __ B(ge, &not_seq_nor_cons); // Go to (5).
-
- // (3) Cons string. Check that it's flat.
- __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
- // Replace subject with first string.
- __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- __ B(&check_underlying);
-
- // (4) Sequential string. Load regexp code according to encoding.
- __ Bind(&seq_string);
-
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- DCHECK(jssp.Is(__ StackPointer()));
- __ Peek(x10, kPreviousIndexOffset);
- __ JumpIfNotSmi(x10, &runtime);
- __ Cmp(jsstring_length, x10);
- __ B(ls, &runtime);
-
- // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
- // before entering the exit frame.
- __ SmiUntag(x1, x10);
-
- // The fourth bit determines the string encoding in string_type.
- STATIC_ASSERT(kOneByteStringTag == 0x08);
- STATIC_ASSERT(kTwoByteStringTag == 0x00);
- STATIC_ASSERT(kStringEncodingMask == 0x08);
-
- // Find the code object based on the assumptions above.
- // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
- // of kPointerSize to reach the latter.
- STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
- JSRegExp::kDataUC16CodeOffset);
- __ Mov(x10, kPointerSize);
- // We will need the encoding later: Latin1 = 0x08
- // UC16 = 0x00
- __ Ands(string_encoding, string_type, kStringEncodingMask);
- __ CzeroX(x10, ne);
- __ Add(x10, regexp_data, x10);
- __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
-
- // (E) Carry on. String handling is done.
-
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(code_object, &runtime);
-
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
- x10,
- x11);
-
// Isolates: note we add an additional parameter here (isolate pointer).
__ EnterExitFrame(false, x10, 1);
DCHECK(csp.Is(__ StackPointer()));
@@ -1496,50 +1287,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x10, ExternalReference::isolate_address(isolate()));
__ Poke(x10, kPointerSize);
- Register length = w11;
- Register previous_index_in_bytes = w12;
- Register start = x13;
-
- // Load start of the subject string.
- __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
- __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
-
- // Handle UC16 encoding, two bytes make one character.
- // string_encoding: if Latin1: 0x08
- // if UC16: 0x00
- STATIC_ASSERT(kStringEncodingMask == 0x08);
- __ Ubfx(string_encoding, string_encoding, 3, 1);
- __ Eor(string_encoding, string_encoding, 1);
- // string_encoding: if Latin1: 0
- // if UC16: 1
-
- // Convert string positions from characters to bytes.
- // Previous index is in x1.
- __ Lsl(previous_index_in_bytes, w1, string_encoding);
- __ Lsl(length, length, string_encoding);
- __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
-
// Argument 1 (x0): Subject string.
- __ Mov(x0, subject);
+ CHECK(x0.is(RegExpExecDescriptor::StringRegister()));
// Argument 2 (x1): Previous index, already there.
+ CHECK(x1.is(RegExpExecDescriptor::LastIndexRegister()));
- // Argument 3 (x2): Get the start of input.
- // Start of input = start of string + previous index + substring offset
- // (0 if the string
- // is not sliced).
- __ Add(w10, previous_index_in_bytes, sliced_string_offset);
- __ Add(x2, start, Operand(w10, UXTW));
-
- // Argument 4 (x3):
- // End of input = start of input + (length of input - previous index)
- __ Sub(w10, length, previous_index_in_bytes);
- __ Add(x3, x2, Operand(w10, UXTW));
+ // Argument 3 (x2): Input start.
+ // Argument 4 (x3): Input end.
+ CHECK(x2.is(RegExpExecDescriptor::StringStartRegister()));
+ CHECK(x3.is(RegExpExecDescriptor::StringEndRegister()));
// Argument 5 (x4): static offsets vector buffer.
__ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
@@ -1550,6 +1307,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x5, 0);
// Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_address);
__ Ldr(x10, MemOperand(x10));
__ Mov(x11, address_of_regexp_stack_memory_size);
@@ -1560,184 +1321,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x7, 1);
// Locate the code entry and call it.
+ Register code_object = RegExpExecDescriptor::CodeRegister();
__ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code_object);
__ LeaveExitFrame(false, x10, true);
- // The generated regexp code returns an int32 in w0.
- Label failure, exception;
- __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
- __ CompareAndBranch(w0,
- NativeRegExpMacroAssembler::EXCEPTION,
- eq,
- &exception);
- __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
-
- // Success: process the result from the native regexp code.
- Register number_of_capture_registers = x12;
-
- // Calculate number of capture registers (number_of_captures + 1) * 2
- // and store it in the last match info.
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(regexp_data,
- JSRegExp::kIrregexpCaptureCountOffset));
- __ Add(x10, x10, x10);
- __ Add(number_of_capture_registers, x10, 2);
-
- // Check that the last match info is a FixedArray.
- DCHECK(jssp.Is(__ StackPointer()));
- __ Peek(last_match_info_elements, kLastMatchInfoOffset);
- __ JumpIfSmi(last_match_info_elements, &runtime);
-
- // Check that the object has fast elements.
- __ Ldr(x10,
- FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
-
- // Check that the last match info has space for the capture registers and the
- // additional information (overhead).
- // (number_of_captures + 1) * 2 + overhead <= last match info size
- // (number_of_captures * 2) + 2 + overhead <= last match info size
- // number_of_capture_registers + overhead <= last match info size
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(last_match_info_elements,
- FixedArray::kLengthOffset));
- __ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
- __ Cmp(x11, x10);
- __ B(gt, &runtime);
-
- // Store the capture count.
- __ SmiTag(x10, number_of_capture_registers);
- __ Str(x10, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kNumberOfCapturesOffset));
- // Store last subject and last input.
- __ Str(subject, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kLastSubjectOffset));
- // Use x10 as the subject string in order to only need
- // one RecordWriteStub.
- __ Mov(x10, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpMatchInfo::kLastSubjectOffset, x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Str(subject, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kLastInputOffset));
- __ Mov(x10, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpMatchInfo::kLastInputOffset, x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
-
- Register last_match_offsets = x13;
- Register offsets_vector_index = x14;
- Register current_offset = x15;
-
- // Get the static offsets vector filled by the native regexp code
- // and fill the last match info.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate());
- __ Mov(offsets_vector_index, address_of_static_offsets_vector);
-
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // iterates down to zero (inclusive).
- __ Add(last_match_offsets, last_match_info_elements,
- RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
- __ Bind(&next_capture);
- __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
- __ B(mi, &done);
- // Read two 32 bit values from the static offsets vector buffer into
- // an X register
- __ Ldr(current_offset,
- MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
- // Store the smi values in the last match info.
- __ SmiTag(x10, current_offset);
- // Clearing the 32 bottom bits gives us a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ Bic(x11, current_offset, kSmiShiftMask);
- __ Stp(x10,
- x11,
- MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
- __ B(&next_capture);
- __ Bind(&done);
-
- // Return last match info.
- __ Mov(x0, last_match_info_elements);
- // Drop the 4 arguments of the stub from the stack.
- __ Drop(4);
- __ Ret();
-
- __ Bind(&exception);
- Register exception_value = x0;
- // A stack overflow (on the backtrack stack) may have occured
- // in the RegExp code but no exception has been created yet.
- // If there is no pending exception, handle that in the runtime system.
- __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
- __ Mov(x11,
- Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ Ldr(exception_value, MemOperand(x11));
- __ Cmp(x10, exception_value);
- __ B(eq, &runtime);
-
- // For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow);
-
- __ Bind(&failure);
- __ Mov(x0, Operand(isolate()->factory()->null_value()));
- // Drop the 4 arguments of the stub from the stack.
- __ Drop(4);
+ // Return the smi-tagged result.
+ __ SmiTag(x0);
__ Ret();
-
- __ Bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec);
-
- // Deferred code for string handling.
- // (5) Long external string? If not, go to (7).
- __ Bind(&not_seq_nor_cons);
- // Compare flags are still set.
- __ B(ne, &not_long_external); // Go to (7).
-
- // (6) External string. Make it, offset-wise, look like a sequential string.
- __ Bind(&external_string);
- if (masm->emit_debug_code()) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Tst(x10, kIsIndirectStringMask);
- __ Check(eq, kExternalStringExpectedButNotFound);
- __ And(x10, x10, kStringRepresentationMask);
- __ Cmp(x10, 0);
- __ Check(ne, kExternalStringExpectedButNotFound);
- }
- __ Ldr(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ B(&seq_string); // Go to (4).
-
- // (7) If this is a short external string or not a string, bail out to
- // runtime.
- __ Bind(&not_long_external);
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ TestAndBranchIfAnySet(string_representation,
- kShortExternalStringMask | kIsNotStringMask,
- &runtime);
-
- // (8) Sliced or thin string. Replace subject with parent.
- Label thin_string;
- __ Cmp(string_representation, kThinStringTag);
- __ B(eq, &thin_string);
- __ Ldr(sliced_string_offset,
- UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ B(&check_underlying); // Go to (1).
-
- __ bind(&thin_string);
- __ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
- __ B(&check_underlying); // Go to (1).
#endif
}
@@ -2509,6 +2102,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
+RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved),
+ saved_fp_regs_(kCallerSavedFP) {
+ DCHECK(!AreAliased(scratch, object, address));
+
+ // The SaveCallerSaveRegisters method needs to save caller-saved
+ // registers, but we don't bother saving MacroAssembler scratch registers.
+ saved_regs_.Remove(MacroAssembler::DefaultTmpList());
+ saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
@@ -2566,6 +2190,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 13e1b9d234..a5ae2d3dee 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -130,9 +130,7 @@ class RecordWriteStub: public PlatformCodeStub {
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
- PatchingAssembler patcher(
- stub->GetIsolate(),
- reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
@@ -172,37 +170,7 @@ class RecordWriteStub: public PlatformCodeStub {
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch)
- : object_(object),
- address_(address),
- scratch0_(scratch),
- saved_regs_(kCallerSaved),
- saved_fp_regs_(kCallerSavedFP) {
- DCHECK(!AreAliased(scratch, object, address));
-
- // The SaveCallerSaveRegisters method needs to save caller-saved
- // registers, but we don't bother saving MacroAssembler scratch registers.
- saved_regs_.Remove(MacroAssembler::DefaultTmpList());
- saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
-
- // We would like to require more scratch registers for this stub,
- // but the number of registers comes down to the ones used in
- // FullCodeGen::SetVar(), which is architecture independent.
- // We allocate 2 extra scratch registers that we'll save on the stack.
- CPURegList pool_available = GetValidRegistersForAllocation();
- CPURegList used_regs(object, address, scratch);
- pool_available.Remove(used_regs);
- scratch1_ = Register(pool_available.PopLowestIndex());
- scratch2_ = Register(pool_available.PopLowestIndex());
-
- // The scratch registers will be restored by other means so we don't need
- // to save them with the other caller saved registers.
- saved_regs_.Remove(scratch0_);
- saved_regs_.Remove(scratch1_);
- saved_regs_.Remove(scratch2_);
- }
+ RegisterAllocation(Register object, Register address, Register scratch);
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
@@ -288,9 +256,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 4fb9a2d939..60375300b8 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 65b8b30610..ddaa30e984 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -199,7 +199,14 @@ const unsigned kFloatExponentBits = 8;
V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \
- V_(CRm, 11, 8, Bits)
+ V_(CRm, 11, 8, Bits) \
+ \
+ /* Load-/store-exclusive */ \
+ V_(LoadStoreXLoad, 22, 22, Bits) \
+ V_(LoadStoreXNotExclusive, 23, 23, Bits) \
+ V_(LoadStoreXAcquireRelease, 15, 15, Bits) \
+ V_(LoadStoreXSizeLog2, 31, 30, Bits) \
+ V_(LoadStoreXPair, 21, 21, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \
@@ -857,7 +864,7 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET
};
-// Load/store acquire/release
+// Load/store acquire/release.
enum LoadStoreAcquireReleaseOp {
LoadStoreAcquireReleaseFixed = 0x08000000,
LoadStoreAcquireReleaseFMask = 0x3F000000,
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 0bedceb6ed..901259f2b4 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
+#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
@@ -94,11 +97,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
- // Save all allocatable floating point registers.
- CPURegList saved_fp_registers(
+ // Save all allocatable double registers.
+ CPURegList saved_double_registers(
CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
- __ PushCPURegList(saved_fp_registers);
+ __ PushCPURegList(saved_double_registers);
+
+ // Save all allocatable float registers.
+ CPURegList saved_float_registers(
+ CPURegister::kFPRegister, kSRegSizeInBits,
+ RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
+ __ PushCPURegList(saved_float_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
@@ -110,10 +119,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
- (saved_fp_registers.Count() * kDRegSize);
+ (saved_double_registers.Count() * kDRegSize) +
+ (saved_float_registers.Count() * kSRegSize);
// Floating point registers are saved on the stack above core registers.
- const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
+ const int kFloatRegistersOffset = saved_registers.Count() * kXRegSize;
+ const int kDoubleRegistersOffset =
+ kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
// Get the bailout id from the stack.
Register bailout_id = x2;
@@ -165,17 +177,28 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Str(x2, MemOperand(x1, offset));
}
- // Copy FP registers to the input frame.
- CPURegList copy_fp_to_input = saved_fp_registers;
- for (int i = 0; i < saved_fp_registers.Count(); i++) {
- int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ // Copy double registers to the input frame.
+ CPURegList copy_double_to_input = saved_double_registers;
+ for (int i = 0; i < saved_double_registers.Count(); i++) {
+ int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
- CPURegister reg = copy_fp_to_input.PopLowestIndex();
+ CPURegister reg = copy_double_to_input.PopLowestIndex();
int dst_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Str(x2, MemOperand(x1, dst_offset));
}
+ // Copy float registers to the input frame.
+ CPURegList copy_float_to_input = saved_float_registers;
+ for (int i = 0; i < saved_float_registers.Count(); i++) {
+ int src_offset = kFloatRegistersOffset + (i * kFloatSize);
+ __ Peek(w2, src_offset);
+ CPURegister reg = copy_float_to_input.PopLowestIndex();
+ int dst_offset =
+ FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
+ __ Str(w2, MemOperand(x1, dst_offset));
+ }
+
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@@ -241,11 +264,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
- !saved_fp_registers.IncludesAliasOf(fp_zero) &&
- !saved_fp_registers.IncludesAliasOf(fp_scratch));
- while (!saved_fp_registers.IsEmpty()) {
- const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_double_registers.IncludesAliasOf(fp_zero) &&
+ !saved_double_registers.IncludesAliasOf(fp_scratch));
+ while (!saved_double_registers.IsEmpty()) {
+ const CPURegister reg = saved_double_registers.PopLowestIndex();
int src_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Ldr(reg, MemOperand(x1, src_offset));
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 8e022b1690..e3ef4595d8 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -916,10 +916,10 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
const char *mnemonic = "unimplemented";
- const char *form = "'Wt, ['Xn]";
- const char *form_x = "'Xt, ['Xn]";
- const char *form_stlx = "'Ws, 'Wt, ['Xn]";
- const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
+ const char* form = "'Wt, ['Xns]";
+ const char* form_x = "'Xt, ['Xns]";
+ const char* form_stlx = "'Ws, 'Wt, ['Xns]";
+ const char* form_stlx_x = "'Ws, 'Xt, ['Xns]";
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
case LDAXR_b: mnemonic = "ldaxrb"; break;
@@ -938,7 +938,8 @@ void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
- default: form = "(LoadStoreAcquireReleaseMask)";
+ default:
+ form = "(LoadStoreAcquireRelease)";
}
Format(instr, mnemonic, form);
}
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index bcdcffb960..09a3ccb709 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/assembler-arm64-inl.h"
#include "src/eh-frame.h"
namespace v8 {
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index d23533d8bc..4b419d6dbd 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -218,22 +218,22 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
-
-void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
+void Instruction::SetImmPCOffsetTarget(Assembler::IsolateData isolate_data,
+ Instruction* target) {
if (IsPCRelAddressing()) {
- SetPCRelImmTarget(isolate, target);
+ SetPCRelImmTarget(isolate_data, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
- SetUnresolvedInternalReferenceImmTarget(isolate, target);
+ SetUnresolvedInternalReferenceImmTarget(isolate_data, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
}
}
-
-void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
+void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
+ Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
- PatchingAssembler patcher(isolate, this,
+ PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this),
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@@ -283,9 +283,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
-
-void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
- Instruction* target) {
+void Instruction::SetUnresolvedInternalReferenceImmTarget(
+ Assembler::IsolateData isolate_data, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@@ -294,7 +293,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
- PatchingAssembler patcher(isolate, this, 2);
+ PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this), 2);
patcher.brk(high16);
patcher.brk(low16);
}
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index db4e3d03a8..6110a14722 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -7,13 +7,13 @@
#include "src/arm64/constants-arm64.h"
#include "src/arm64/utils-arm64.h"
+#include "src/assembler.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
@@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
- void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target);
- void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
+ void SetImmPCOffsetTarget(AssemblerBase::IsolateData isolate_data,
+ Instruction* target);
+ void SetUnresolvedInternalReferenceImmTarget(AssemblerBase::IsolateData,
Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@@ -411,7 +412,8 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
- void SetPCRelImmTarget(Isolate* isolate, Instruction* target);
+ void SetPCRelImmTarget(AssemblerBase::IsolateData isolate_data,
+ Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index dad89fe6bf..c6e27f8ee3 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -61,39 +61,39 @@ typedef struct {
CounterType type;
} CounterDescriptor;
-
static const CounterDescriptor kCounterList[] = {
- {"Instruction", Cumulative},
-
- {"Move Immediate", Gauge},
- {"Add/Sub DP", Gauge},
- {"Logical DP", Gauge},
- {"Other Int DP", Gauge},
- {"FP DP", Gauge},
-
- {"Conditional Select", Gauge},
- {"Conditional Compare", Gauge},
-
- {"Unconditional Branch", Gauge},
- {"Compare and Branch", Gauge},
- {"Test and Branch", Gauge},
- {"Conditional Branch", Gauge},
-
- {"Load Integer", Gauge},
- {"Load FP", Gauge},
- {"Load Pair", Gauge},
- {"Load Literal", Gauge},
-
- {"Store Integer", Gauge},
- {"Store FP", Gauge},
- {"Store Pair", Gauge},
-
- {"PC Addressing", Gauge},
- {"Other", Gauge},
- {"SP Adjust", Gauge},
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+ {"Load Acquire", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+ {"Store Release", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
};
-
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 988f7e935d..c73d371e8f 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -57,6 +57,11 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
+const Register RegExpExecDescriptor::StringRegister() { return x0; }
+const Register RegExpExecDescriptor::LastIndexRegister() { return x1; }
+const Register RegExpExecDescriptor::StringStartRegister() { return x2; }
+const Register RegExpExecDescriptor::StringEndRegister() { return x3; }
+const Register RegExpExecDescriptor::CodeRegister() { return x8; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@@ -310,46 +315,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void KeyedDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor noInlineDescriptor =
- PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- x2, // key
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &noInlineDescriptor);
-}
-
-
-void NamedDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor noInlineDescriptor =
- PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- x2, // name
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &noInlineDescriptor);
-}
-
-
-void CallHandlerDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- x0, // receiver
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
-}
-
-
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
@@ -388,7 +353,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
@@ -398,7 +363,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
@@ -410,8 +375,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsThenConstructArrayDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x1, // target to call checked to be Array function
@@ -436,7 +401,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume
- x2 // the resume mode (tagged)
+ x2, // the resume mode (tagged)
+ x3 // SuspendFlags (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index f19d6909f8..e2fbc8f4af 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -12,9 +12,8 @@
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
#include "src/base/bits.h"
-
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -37,12 +36,6 @@ MemOperand UntagSmiMemOperand(Register object, int offset) {
}
-Handle<Object> MacroAssembler::CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
-}
-
-
void MacroAssembler::And(const Register& rd,
const Register& rn,
const Operand& operand) {
@@ -1239,6 +1232,14 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
+void MacroAssembler::AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ DCHECK(sp_alignment >= 16);
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+}
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_));
@@ -1441,14 +1442,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
Bic(untagged_obj, obj, kHeapObjectTag);
}
-
-void MacroAssembler::IsObjectNameType(Register object,
- Register type,
- Label* fail) {
- CompareObjectType(object, type, type, LAST_NAME_TYPE);
- B(hi, fail);
-}
-
+void MacroAssembler::jmp(Label* L) { B(L); }
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
@@ -1477,6 +1471,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
Push(tmp);
}
+void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 549db5d048..5edcd7b044 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -4,16 +4,19 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
+#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/arm64/frames-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
+#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
namespace v8 {
namespace internal {
@@ -21,23 +24,23 @@ namespace internal {
// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
#define __
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
+MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
- : Assembler(arg_isolate, buffer, buffer_size),
+ : Assembler(isolate, buffer, buffer_size),
generating_stub_(false),
#if DEBUG
allow_macro_instructions_(true),
#endif
has_frame_(false),
+ isolate_(isolate),
use_real_aborts_(true),
sp_(jssp),
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
- Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
+ Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
@@ -1232,6 +1235,12 @@ void MacroAssembler::PopPostamble(Operand total_size) {
}
}
+void MacroAssembler::PushPreamble(int count, int size) {
+ PushPreamble(count * size);
+}
+void MacroAssembler::PopPostamble(int count, int size) {
+ PopPostamble(count * size);
+}
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
@@ -1428,6 +1437,21 @@ void MacroAssembler::LoadHeapObject(Register result,
Mov(result, Operand(object));
}
+void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ DCHECK(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+}
+
+void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); }
+void MacroAssembler::Move(Register dst, Handle<Object> x) {
+ LoadObject(dst, x);
+}
+void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
@@ -1595,20 +1619,6 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
}
-void MacroAssembler::AssertName(Register object) {
- if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAName);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
-
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(temp, temp, LAST_NAME_TYPE);
- Check(ls, kOperandIsNotAName);
- }
-}
-
-
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
@@ -1634,31 +1644,36 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object) {
- if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
+void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
+ // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+ if (!emit_debug_code()) return;
+ AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ // Load map
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
- Check(eq, kOperandIsNotAGeneratorObject);
- }
-}
+ // Load instance type
+ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
-void MacroAssembler::AssertReceiver(Register object) {
- if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
+ Label async, do_check;
+ STATIC_ASSERT(static_cast<int>(SuspendFlags::kGeneratorTypeMask) == 4);
+ DCHECK(!temp.is(flags));
+ B(&async, reg_bit_set, flags, 2);
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ // Check if JSGeneratorObject
+ Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
+ jmp(&do_check);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
- Check(hs, kOperandIsNotAReceiver);
- }
-}
+ bind(&async);
+ // Check if JSAsyncGeneratorObject
+ Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ bind(&do_check);
+ // Restore generator object to register and perform assertion
+ Check(eq, kOperandIsNotAGeneratorObject);
+}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
@@ -1674,20 +1689,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(ne, kOperandIsASmiAndNotAString);
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
- Check(lo, kOperandIsNotAString);
- }
-}
-
-
void MacroAssembler::AssertPositiveOrZero(Register value) {
if (emit_debug_code()) {
Label done;
@@ -1698,28 +1699,6 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
-void MacroAssembler::AssertNotNumber(Register value) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- Tst(value, kSmiTagMask);
- Check(ne, kOperandIsANumber);
- Label done;
- JumpIfNotHeapNumber(value, &done);
- Abort(kOperandIsANumber);
- Bind(&done);
- }
-}
-
-void MacroAssembler::AssertNumber(Register value) {
- if (emit_debug_code()) {
- Label done;
- JumpIfSmi(value, &done);
- JumpIfHeapNumber(value, &done);
- Abort(kOperandIsNotANumber);
- Bind(&done);
- }
-}
-
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -3331,30 +3310,6 @@ void MacroAssembler::CheckMap(Register obj_map,
}
-void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
- Register scratch2, Handle<WeakCell> cell,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
- CmpWeakValue(scratch1, cell, scratch2);
- B(ne, &fail);
- Jump(success, RelocInfo::CODE_TARGET);
- Bind(&fail);
-}
-
-
-void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
- Register scratch) {
- Mov(scratch, Operand(cell));
- Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
- Cmp(value, scratch);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -3384,7 +3339,6 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
-
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
@@ -3683,6 +3637,13 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+}
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
@@ -4082,20 +4043,6 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
}
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Label ok;
- Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
- JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
- JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
- Abort(kJSObjectWithFastElementsMapHasSlowElements);
- Bind(&ok);
- }
-}
-
void MacroAssembler::AssertIsString(const Register& object) {
if (emit_debug_code()) {
@@ -4584,6 +4531,13 @@ CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
return reg;
}
+MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
+}
#define __ masm->
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 560a824c04..e60fbe33fe 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -167,7 +167,12 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
CodeObjectRequired create_code_object);
- inline Handle<Object> CodeObject();
+ Isolate* isolate() const { return isolate_; }
+
+ Handle<Object> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
// Instruction set functions ------------------------------------------------
// Logical macros.
@@ -672,7 +677,7 @@ class MacroAssembler : public Assembler {
// This is a convenience method for pushing a single Handle<Object>.
inline void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ inline void Push(Smi* smi);
// Aliases of Push and Pop, required for V8 compatibility.
inline void push(Register src) {
@@ -872,14 +877,7 @@ class MacroAssembler : public Assembler {
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer.
- inline void AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- DCHECK(sp_alignment >= 16);
- DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
- SetStackPointer(csp);
- }
+ inline void AlignAndSetCSPForFrame();
// Push the system stack pointer (csp) down to allow the same to be done to
// the current stack pointer (according to StackPointer()). This must be
@@ -923,23 +921,15 @@ class MacroAssembler : public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- DCHECK(object->IsSmi());
- Mov(result, Operand(object));
- }
- }
+ void LoadObject(Register result, Handle<Object> object);
static int SafepointRegisterStackIndex(int reg_code);
// This is required for compatibility with architecture independant code.
// Remove if not needed.
- inline void Move(Register dst, Register src) { Mov(dst, src); }
- inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
- inline void Move(Register dst, Smi* src) { Mov(dst, src); }
+ void Move(Register dst, Register src);
+ void Move(Register dst, Handle<Object> x);
+ void Move(Register dst, Smi* src);
void LoadInstanceDescriptors(Register map,
Register descriptors);
@@ -1004,38 +994,25 @@ class MacroAssembler : public Assembler {
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
- // Abort execution if argument is not a name, enabled via --debug-code.
- void AssertName(Register object);
-
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
- void AssertGeneratorObject(Register object);
+ void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
- void AssertReceiver(Register object);
-
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
// Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code.
void AssertPositiveOrZero(Register value);
- // Abort execution if argument is not a number (heap number or smi).
- void AssertNumber(Register value);
- void AssertNotNumber(Register value);
-
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@@ -1112,7 +1089,7 @@ class MacroAssembler : public Assembler {
// ---- Calling / Jumping helpers ----
// This is required for compatibility in architecture indepenedant code.
- inline void jmp(Label* L) { B(L); }
+ inline void jmp(Label* L);
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub);
@@ -1445,16 +1422,6 @@ class MacroAssembler : public Assembler {
Label* fail,
SmiCheckType smi_check_type);
- // Check if the map of an object is equal to a specified weak map and branch
- // to a specified target if equal. Skip the smi check if not required
- // (object is known to be a heap object)
- void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
- Handle<WeakCell> cell, Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Compare the given value and the value of weak cell.
- void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1485,13 +1452,6 @@ class MacroAssembler : public Assembler {
Heap::RootListIndex index,
Label* if_not_equal);
- // Load and check the instance type of an object for being a unique name.
- // Loads the type into the second argument register.
- // The object and type arguments can be the same register; in that case it
- // will be overwritten with the type.
- // Fall-through if the object was a string and jump on fail otherwise.
- inline void IsObjectNameType(Register object, Register type, Label* fail);
-
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
@@ -1665,15 +1625,11 @@ class MacroAssembler : public Assembler {
void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst) {
- Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
- }
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src) {
- Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
- }
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
@@ -1808,7 +1764,6 @@ class MacroAssembler : public Assembler {
Register reg,
Heap::RootListIndex index,
BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
- void AssertFastElements(Register elements);
// Abort if the specified register contains the invalid color bit pattern.
// The pattern must be in bits [1:0] of 'reg' register.
@@ -1922,8 +1877,8 @@ class MacroAssembler : public Assembler {
void PushPreamble(Operand total_size);
void PopPostamble(Operand total_size);
- void PushPreamble(int count, int size) { PushPreamble(count * size); }
- void PopPostamble(int count, int size) { PopPostamble(count * size); }
+ void PushPreamble(int count, int size);
+ void PopPostamble(int count, int size);
private:
// The actual Push and Pop implementations. These don't generate any code
@@ -1977,6 +1932,7 @@ class MacroAssembler : public Assembler {
bool allow_macro_instructions_;
#endif
bool has_frame_;
+ Isolate* isolate_;
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
@@ -2118,15 +2074,8 @@ class UseScratchRegisterScope {
RegList old_availablefp_; // kFPRegister
};
-
-inline MemOperand ContextMemOperand(Register context, int index = 0) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-inline MemOperand NativeContextMemOperand() {
- return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
-}
-
+MemOperand ContextMemOperand(Register context, int index = 0);
+MemOperand NativeContextMemOperand();
// Encode and decode information about patchable inline SMI checks.
class InlineSmiCheckInfo {
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 83b4cf7ee8..b536fd5e9c 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -10,10 +10,11 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
-#include "src/assembler.h"
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
@@ -55,6 +56,9 @@ TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
+// static
+base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
+ LAZY_INSTANCE_INITIALIZER;
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
@@ -429,6 +433,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() {
+ global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
@@ -1628,6 +1633,15 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (instr->IsLoad()) {
+ local_monitor_.NotifyLoad(address);
+ } else {
+ local_monitor_.NotifyStore(address);
+ global_monitor_.Pointer()->NotifyStore_Locked(address,
+ &global_monitor_processor_);
+ }
+
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@@ -1730,6 +1744,19 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (instr->IsLoad()) {
+ local_monitor_.NotifyLoad(address);
+ local_monitor_.NotifyLoad(address2);
+ } else {
+ local_monitor_.NotifyStore(address);
+ local_monitor_.NotifyStore(address2);
+ global_monitor_.Pointer()->NotifyStore_Locked(address,
+ &global_monitor_processor_);
+ global_monitor_.Pointer()->NotifyStore_Locked(address2,
+ &global_monitor_processor_);
+ }
+
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@@ -1853,6 +1880,9 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(address);
+
switch (instr->Mask(LoadLiteralMask)) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_FP_REGS), then print a more detailed log.
@@ -1906,8 +1936,108 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
}
+Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
+ switch (size) {
+ case 0:
+ return TransactionSize::None;
+ case 1:
+ return TransactionSize::Byte;
+ case 2:
+ return TransactionSize::HalfWord;
+ case 4:
+ return TransactionSize::Word;
+ default:
+ UNREACHABLE();
+ }
+ return TransactionSize::None;
+}
+
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
- // TODO(binji)
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+ LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
+ instr->Mask(LoadStoreAcquireReleaseMask));
+ int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
+ int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
+ int32_t is_load = instr->LoadStoreXLoad();
+ int32_t is_pair = instr->LoadStoreXPair();
+ USE(is_acquire_release);
+ USE(is_pair);
+ DCHECK_NE(is_acquire_release, 0); // Non-acquire/release unimplemented.
+ DCHECK_EQ(is_pair, 0); // Pair unimplemented.
+ unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
+ uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
+ DCHECK_EQ(address % access_size, 0);
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (is_load != 0) {
+ if (is_exclusive) {
+ local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(
+ address, &global_monitor_processor_);
+ } else {
+ local_monitor_.NotifyLoad(address);
+ }
+ switch (op) {
+ case LDAR_b:
+ case LDAXR_b:
+ set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
+ break;
+ case LDAR_h:
+ case LDAXR_h:
+ set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
+ break;
+ case LDAR_w:
+ case LDAXR_w:
+ set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ LogRead(address, access_size, rt);
+ } else {
+ if (is_exclusive) {
+ unsigned rs = instr->Rs();
+ if (local_monitor_.NotifyStoreExcl(address,
+ get_transaction_size(access_size)) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ address, &global_monitor_processor_)) {
+ switch (op) {
+ case STLXR_b:
+ MemoryWrite<uint8_t>(address, wreg(rt));
+ break;
+ case STLXR_h:
+ MemoryWrite<uint16_t>(address, wreg(rt));
+ break;
+ case STLXR_w:
+ MemoryWrite<uint32_t>(address, wreg(rt));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ LogWrite(address, access_size, rt);
+ set_wreg(rs, 0);
+ } else {
+ set_wreg(rs, 1);
+ }
+ } else {
+ local_monitor_.NotifyStore(address);
+ global_monitor_.Pointer()->NotifyStore_Locked(address,
+ &global_monitor_processor_);
+ switch (op) {
+ case STLR_b:
+ MemoryWrite<uint8_t>(address, wreg(rt));
+ break;
+ case STLR_h:
+ MemoryWrite<uint16_t>(address, wreg(rt));
+ break;
+ case STLR_w:
+ MemoryWrite<uint32_t>(address, wreg(rt));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+ }
}
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
@@ -3877,6 +4007,186 @@ void Simulator::DoPrintf(Instruction* instr) {
delete[] format;
}
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A non exclusive load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // It is allowed for a processor to require that the address matches
+ // exactly (B2.10.1), so this comparison does not mask addr.
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ // It is implementation-defined whether an exclusive store to a
+ // non-tagged address will update memory. As a result, it's most strict
+ // to unconditionally clear the local monitor.
+ Clear();
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::Processor::Processor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::Processor::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
+ uintptr_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
+ uintptr_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ if (is_requesting_processor) {
+ // It is allowed for a processor to require that the address matches
+ // exactly (B2.10.2), so this comparison does not mask addr.
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional stxr failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another processor so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
+
+void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
+ Processor* processor) {
+ processor->NotifyLoadExcl_Locked(addr);
+ PrependProcessor_Locked(processor);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr,
+ Processor* processor) {
+ // Notify each processor of the store operation.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ bool is_requesting_processor = iter == processor;
+ iter->NotifyStore_Locked(addr, is_requesting_processor);
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
+ Processor* processor) {
+ DCHECK(IsProcessorInLinkedList_Locked(processor));
+ if (processor->NotifyStoreExcl_Locked(addr, true)) {
+ // Notify the other processors that this StoreExcl succeeded.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ if (iter != processor) {
+ iter->NotifyStoreExcl_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ Processor* processor) const {
+ return head_ == processor || processor->next_ || processor->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
+ if (IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = processor;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = head_;
+ head_ = processor;
+}
+
+void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (processor->prev_) {
+ processor->prev_->next_ = processor->next_;
+ } else {
+ head_ = processor->next_;
+ }
+ if (processor->next_) {
+ processor->next_->prev_ = processor->prev_;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = nullptr;
+}
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index c8c715a067..3016e616e4 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -865,6 +865,97 @@ class Simulator : public DecoderVisitor {
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
+ // Synchronization primitives. See ARM DDI 0487A.a, B2.10. Pair types not
+ // implemented.
+ enum class MonitorAccess {
+ Open,
+ Exclusive,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Byte = 1,
+ HalfWord = 2,
+ Word = 4,
+ };
+
+ TransactionSize get_transaction_size(unsigned size);
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and 11. See ARM DDI 0487A.a, B2.10.3.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreExcl only returns
+ // true if the exclusive store is allowed; the global monitor will still
+ // have to be checked to see whether the memory should be updated.
+ void NotifyLoad(uintptr_t addr);
+ void NotifyLoadExcl(uintptr_t addr, TransactionSize size);
+ void NotifyStore(uintptr_t addr);
+ bool NotifyStoreExcl(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ GlobalMonitor();
+
+ class Processor {
+ public:
+ Processor();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadExcl_Locked(uintptr_t addr);
+ void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor);
+ bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ Processor* next_;
+ Processor* prev_;
+ // A stxr can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // exclusive store fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor);
+ void NotifyStore_Locked(uintptr_t addr, Processor* processor);
+ bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor);
+
+ // Called when the simulator is destroyed.
+ void RemoveProcessor(Processor* processor);
+
+ private:
+ bool IsProcessorInLinkedList_Locked(Processor* processor) const;
+ void PrependProcessor_Locked(Processor* processor);
+
+ Processor* head_;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::Processor global_monitor_processor_;
+ static base::LazyInstance<GlobalMonitor>::type global_monitor_;
+
private:
void Init(FILE* stream);