aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/arm64/assembler-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/arm64/assembler-arm64.cc')
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc423
1 files changed, 121 insertions, 302 deletions
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 1806f82b46..159e763ba2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -34,6 +34,7 @@
#include "src/base/cpu.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/execution/frame-constants.h"
@@ -283,11 +284,6 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
return true;
}
-void Immediate::InitializeHandle(Handle<HeapObject> handle) {
- value_ = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
-}
-
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
@@ -298,167 +294,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
-bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
- int offset) {
- auto existing = entry_map.find(data);
- if (existing == entry_map.end()) {
- entry_map[data] = static_cast<int>(entries_.size());
- entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
- return true;
- }
- int index = existing->second;
- entries_[index].second.push_back(offset);
- return false;
-}
-
-// Constant Pool.
-bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
- DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
- mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
- mode != RelocInfo::DEOPT_INLINING_ID &&
- mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
-
- bool write_reloc_info = true;
-
- uint64_t raw_data = static_cast<uint64_t>(data);
- int offset = assm_->pc_offset();
- if (IsEmpty()) {
- first_use_ = offset;
- }
-
- if (RelocInfo::IsShareableRelocMode(mode)) {
- write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
- } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
- // A zero data value is a placeholder and must not be shared.
- write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
- } else {
- entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
- }
-
- if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
- // Request constant pool emission after the next instruction.
- assm_->SetNextConstPoolCheckIn(1);
- }
-
- return write_reloc_info;
-}
-
-int ConstPool::DistanceToFirstUse() {
- DCHECK_GE(first_use_, 0);
- return assm_->pc_offset() - first_use_;
-}
-
-int ConstPool::MaxPcOffset() {
- // There are no pending entries in the pool so we can never get out of
- // range.
- if (IsEmpty()) return kMaxInt;
-
- // Entries are not necessarily emitted in the order they are added so in the
- // worst case the first constant pool use will be accessing the last entry.
- return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
-}
-
-int ConstPool::WorstCaseSize() {
- if (IsEmpty()) return 0;
-
- // Max size prologue:
- // b over
- // ldr xzr, #pool_size
- // blr xzr
- // nop
- // All entries are 64-bit for now.
- return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
-}
-
-int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
- if (IsEmpty()) return 0;
-
- // Prologue is:
- // b over ;; if require_jump
- // ldr xzr, #pool_size
- // blr xzr
- // nop ;; if not 64-bit aligned
- int prologue_size = require_jump ? kInstrSize : 0;
- prologue_size += 2 * kInstrSize;
- prologue_size +=
- IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
-
- // All entries are 64-bit for now.
- return prologue_size + EntryCount() * kSystemPointerSize;
-}
-
-void ConstPool::Emit(bool require_jump) {
- DCHECK(!assm_->is_const_pool_blocked());
- // Prevent recursive pool emission and protect from veneer pools.
- Assembler::BlockPoolsScope block_pools(assm_);
-
- int size = SizeIfEmittedAtCurrentPc(require_jump);
- Label size_check;
- assm_->bind(&size_check);
-
- assm_->RecordConstPool(size);
- // Emit the constant pool. It is preceded by an optional branch if
- // require_jump and a header which will:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // 3) align the pool entries to 64-bit.
- // The header is therefore made of up to three arm64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // nop
- //
- // If executed, the header will likely segfault and lr will point to the
- // instruction following the offending blr.
- // TODO(all): Make the alignment part less fragile. Currently code is
- // allocated as a byte array so there are no guarantees the alignment will
- // be preserved on compaction. Currently it works as allocation seems to be
- // 64-bit aligned.
-
- // Emit branch if required
- Label after_pool;
- if (require_jump) {
- assm_->b(&after_pool);
- }
-
- // Emit the header.
- assm_->RecordComment("[ Constant Pool");
- EmitMarker();
- EmitGuard();
- assm_->Align(8);
-
- // Emit constant pool entries.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- EmitEntries();
- assm_->RecordComment("]");
-
- if (after_pool.is_linked()) {
- assm_->bind(&after_pool);
- }
-
- DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(size));
-}
-
-void ConstPool::Clear() {
- shared_entries_.clear();
- handle_to_index_map_.clear();
- entries_.clear();
- first_use_ = -1;
-}
-
-void ConstPool::EmitMarker() {
- // A constant pool size is expressed in number of 32-bits words.
- // Currently all entries are 64-bit.
- // + 1 is for the crash guard.
- // + 0/1 for alignment.
- int word_count =
- EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
- assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
- Assembler::Rt(xzr));
-}
-
MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA, const MemOperand& operandB,
int access_size_log2) {
@@ -484,47 +319,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
return kNotPair;
}
-void ConstPool::EmitGuard() {
-#ifdef DEBUG
- Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
- DCHECK(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
- assm_->EmitPoolGuard();
-}
-
-void ConstPool::EmitEntries() {
- DCHECK(IsAligned(assm_->pc_offset(), 8));
-
- // Emit entries.
- for (const auto& entry : entries_) {
- for (const auto& pc : entry.second) {
- Instruction* instr = assm_->InstructionAt(pc);
-
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
- }
-
- assm_->dc64(entry.first);
- }
- Clear();
-}
-
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- constpool_(this),
- unresolved_branches_() {
- const_pool_blocked_nesting_ = 0;
+ unresolved_branches_(),
+ constpool_(this) {
veneer_pool_blocked_nesting_ = 0;
Reset();
}
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
}
@@ -533,7 +339,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
void Assembler::Reset() {
#ifdef DEBUG
DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_start_, 0, pc_ - buffer_start_);
@@ -541,9 +346,7 @@ void Assembler::Reset() {
pc_ = buffer_start_;
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constpool_.Clear();
- next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
- no_const_pool_before_ = 0;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -554,14 +357,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
request.heap_number(), AllocationType::kOld);
- set_target_address_at(pc, 0 /* unused */, object.address());
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
- set_target_address_at(pc, 0 /* unused */,
- str->AllocateStringConstant(isolate).address());
+ EmbeddedObjectIndex index =
+ AddEmbeddedObject(str->AllocateStringConstant(isolate));
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
}
@@ -572,7 +377,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// Emit constant pool if necessary.
- CheckConstPool(true, false);
+ ForceConstantPoolEmissionWithoutJump();
DCHECK(constpool_.IsEmpty());
int code_comments_size = WriteCodeComments();
@@ -870,32 +675,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
}
}
-void Assembler::StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_constant_pool_check_ = kMaxInt;
- }
-}
-
-void Assembler::EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pc_offset() < constpool_.MaxPcOffset());
- // Two cases:
- // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
- // will trigger a check.
- next_constant_pool_check_ = no_const_pool_before_;
- }
-}
-
-bool Assembler::is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
-}
-
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
@@ -1497,6 +1276,7 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.IsHeapObjectRequest()) {
+ BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this);
RequestHeapObject(operand.heap_object_request());
ldr(rt, operand.immediate_for_heap_object_request());
} else {
@@ -1505,11 +1285,8 @@ void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
}
void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
- // Currently we only support 64-bit literals.
- DCHECK(rt.Is64Bits());
-
+ BlockPoolsScope no_pool_before_ldr_pcrel_instr(this);
RecordRelocInfo(imm.rmode(), imm.value());
- BlockConstPoolFor(1);
// The load will be patched when the constpool is emitted, patching code
// expect a load literal with offset 0.
ldr_pcrel(rt, 0);
@@ -3679,6 +3456,7 @@ void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
}
void Assembler::dcptr(Label* label) {
+ BlockPoolsScope no_pool_inbetween(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
if (label->is_bound()) {
// The label is bound, so it does not need to be updated and the internal
@@ -4471,8 +4249,10 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
- *p += pc_delta;
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
}
// Pending relocation entries are also relative, no need to relocate.
@@ -4492,17 +4272,31 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
- bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- if (!new_constpool_entry) return;
+ if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(data));
+ data = AddEmbeddedObject(handle);
+ }
+ if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
+ if (constpool_.RecordEntry(static_cast<uint32_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ } else {
+ if (constpool_.RecordEntry(static_cast<uint64_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ }
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
if (!ShouldRecordRelocInfo(rmode)) return;
+ // Callers should ensure that constant pool emission is blocked until the
+ // instruction the reloc info is associated with has been emitted.
+ DCHECK(constpool_.IsBlocked());
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
@@ -4511,103 +4305,127 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_b_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
}
void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
bl(offset);
}
void Assembler::near_call(HeapObjectRequest request) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapObject(request);
- int index = AddCodeTarget(Handle<Code>());
+ EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
- bl(index);
+ DCHECK(is_int32(index));
+ bl(static_cast<int>(index));
}
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstrSize;
- if (no_const_pool_before_ < pc_limit) {
- no_const_pool_before_ = pc_limit;
- // Make sure the pool won't be blocked for too long.
- DCHECK(pc_limit < constpool_.MaxPcOffset());
- }
+// Constant Pool
- if (next_constant_pool_check_ < no_const_pool_before_) {
- next_constant_pool_check_ = no_const_pool_before_;
- }
+void ConstantPool::EmitPrologue(Alignment require_alignment) {
+ // Recorded constant pool size is expressed in number of 32-bits words,
+ // and includes prologue and alignment, but not the jump around the pool
+ // and the size of the marker itself.
+ const int marker_size = 1;
+ int word_count =
+ ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
+ assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
+ Assembler::Rt(xzr));
+ assm_->EmitPoolGuard();
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- DCHECK(!force_emit);
- return;
- }
+int ConstantPool::PrologueSize(Jump require_jump) const {
+ // Prologue is:
+ // b over ;; if require_jump
+ // ldr xzr, #pool_size
+ // blr xzr
+ int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ return prologue_size;
+}
- // There is nothing to do if there are no pending constant pool entries.
- if (constpool_.IsEmpty()) {
- // Calculate the offset of the next check.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
- return;
- }
+void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
+ Instruction* entry_offset,
+ const ConstantPoolKey& key) {
+ Instruction* instr = assm_->InstructionAt(load_offset);
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->options(), entry_offset);
+}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kApproxMaxDistToConstPool or more.
- // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
- int dist = constpool_.DistanceToFirstUse();
- int count = constpool_.EntryCount();
- if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
- (count < kApproxMaxPoolEntryCount)) {
+void ConstantPool::Check(Emission force_emit, Jump require_jump,
+ size_t margin) {
+ // Some short sequence of instruction must not be broken up by constant pool
+ // emission, such sequences are protected by a ConstPool::BlockScope.
+ if (IsBlocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ DCHECK_EQ(force_emit, Emission::kIfNeeded);
return;
}
- // Emit veneers for branches that would go out of range during emission of the
- // constant pool.
- int worst_case_size = constpool_.WorstCaseSize();
- CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
+ // We emit a constant pool only if :
+ // * it is not empty
+ // * emission is forced by parameter force_emit (e.g. at function end).
+ // * emission is mandatory or opportune according to {ShouldEmitNow}.
+ if (!IsEmpty() && (force_emit == Emission::kForced ||
+ ShouldEmitNow(require_jump, margin))) {
+ // Emit veneers for branches that would go out of range during emission of
+ // the constant pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ assm_->CheckVeneerPool(false, require_jump == Jump::kRequired,
+ assm_->kVeneerDistanceMargin + worst_case_size +
+ static_cast<int>(margin));
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + assm_->kGap;
+ while (assm_->buffer_space() <= needed_space) {
+ assm_->GrowBuffer();
+ }
- // Check that the code buffer is large enough before emitting the constant
- // pool (this includes the gap to the relocation information).
- int needed_space = worst_case_size + kGap + 1 * kInstrSize;
- while (buffer_space() <= needed_space) {
- GrowBuffer();
+ EmitAndClear(require_jump);
}
-
- Label size_check;
- bind(&size_check);
- constpool_.Emit(require_jump);
- DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
- static_cast<unsigned>(worst_case_size));
-
- // Since a constant pool was just emitted, move the check offset forward by
+ // Since a constant pool is (now) empty, move the check offset forward by
// the standard interval.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
+ SetNextCheckIn(ConstantPool::kCheckInterval);
}
-bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+// Pool entries are accessed with pc relative load therefore this cannot be more
+// than 1 * MB. Since constant pool emission checks are interval based, and we
+// want to keep entries close to the code, we try to emit every 64KB.
+const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
+const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
+const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
+const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
+const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
+
+const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
+const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
+const size_t ConstantPool::kApproxMaxEntryCount = 512;
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstrSize;
- return pc_offset() >
- max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+ return static_cast<intptr_t>(pc_offset() + margin + protection_offset +
+ unresolved_branches_.size() *
+ kMaxVeneerCodeSize) >= max_reachable_pc;
}
void Assembler::RecordVeneerPool(int location_offset, int size) {
+ Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
reloc_info_writer.Write(&rinfo);
}
-void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
- BlockPoolsScope scope(this);
+void Assembler::EmitVeneers(bool force_emit, bool need_protection,
+ size_t margin) {
+ BlockPoolsScope scope(this, PoolEmissionCheck::kSkip);
RecordComment("[ Veneers");
// The exact size of the veneer pool must be recorded (see the comment at the
@@ -4677,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
- int margin) {
+ size_t margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
@@ -4713,6 +4531,7 @@ int Assembler::buffer_space() const {
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
+ Assembler::BlockPoolsScope block_pools(this);
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}