summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/mips')
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc117
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc135
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h10
5 files changed, 139 insertions, 143 deletions
diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index d8181ad8f5..53e6f93411 100644
--- a/deps/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -133,7 +133,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
if (Assembler::IsJicOrJialc(instr2)) {
// Encoded internal references are lui/jic load of 32-bit absolute address.
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
@@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 423da2fb65..768b16b86c 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -231,8 +231,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
@@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// before that addition, difference between upper part of the target address and
// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
-void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
- int16_t& jic_offset) {
- lui_offset = (address & kHiMask) >> kLuiShift;
- jic_offset = address & kLoMask;
+void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset) {
+ *lui_offset = (address & kHiMask) >> kLuiShift;
+ *jic_offset = address & kLoMask;
- if (jic_offset < 0) {
- lui_offset -= kImm16Mask;
+ if (*jic_offset < 0) {
+ *lui_offset -= kImm16Mask;
}
}
void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
- uint32_t& lui_offset,
- uint32_t& jic_offset) {
+ uint32_t* lui_offset,
+ uint32_t* jic_offset) {
int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
int16_t jic_offset16 = address & kLoMask;
if (jic_offset16 < 0) {
lui_offset16 -= kImm16Mask;
}
- lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
- jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
+ *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
+ *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
}
void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
@@ -977,7 +977,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-void Assembler::AdjustBaseAndOffset(MemOperand& src,
+void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
- bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
- if (is_int16(src.offset()) &&
+ if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
- src.offset() + second_access_add_to_offset)))) {
+ src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
- // while loading 'offset'.
+ DCHECK(src->rm() != scratch); // Must not overwrite the register 'base'
+ // while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
- uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+ uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
- addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
- src.offset_ -= kMinOffsetForSimpleAdjustment;
- } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
- src.offset_ += kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (IsMipsArchVariant(kMips32r6)) {
// On r6 take advantage of the aui instruction, e.g.:
// aui at, base, offset_high
@@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// addiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
- int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
- int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
+ int16_t offset_low = static_cast<uint16_t>(src->offset());
offset_high += (offset_low < 0)
? 1
: 0; // Account for offset sign extension in load/store.
- aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
+ aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (two_accesses && !is_int16(static_cast<int32_t>(
offset_low + second_access_add_to_offset))) {
// Avoid overflow in the 16-bit offset of the load/store instruction when
@@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
addiu(scratch, scratch, kDoubleSize);
offset_low -= kDoubleSize;
}
- src.offset_ = offset_low;
+ src->offset_ = offset_low;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@@ -2013,62 +2013,62 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
- addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
+ addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
- src.offset_ -= kMinOffsetForMediumAdjustment;
- } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ src->offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
- src.offset_ += kMinOffsetForMediumAdjustment;
+ src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
- int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
- addu(scratch, scratch, src.rm());
- src.offset_ -= loaded_offset;
+ addu(scratch, scratch, src->rm());
+ src->offset_ -= loaded_offset;
}
}
- src.rm_ = scratch;
+ src->rm_ = scratch;
- DCHECK(is_int16(src.offset()));
+ DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
- static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
- DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LB, source.rm(), rd, source.offset());
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LBU, source.rm(), rd, source.offset());
}
void Assembler::lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LH, source.rm(), rd, source.offset());
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LHU, source.rm(), rd, source.offset());
}
void Assembler::lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LW, source.rm(), rd, source.offset());
}
@@ -2088,19 +2088,19 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SB, source.rm(), rd, source.offset());
}
void Assembler::sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SH, source.rm(), rd, source.offset());
}
void Assembler::sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SW, source.rm(), rd, source.offset());
}
@@ -2385,13 +2385,13 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
}
@@ -2969,7 +2969,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#define MSA_LD_ST(name, opcode) \
void Assembler::name(MSARegister wd, const MemOperand& rs) { \
MemOperand source = rs; \
- AdjustBaseAndOffset(source); \
+ AdjustBaseAndOffset(&source); \
if (is_int10(source.offset())) { \
GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
} else { \
@@ -3473,7 +3473,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm,
+ &lui_offset_u, &jic_offset_u);
instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -3717,7 +3718,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
if (IsJicOrJialc(instr2)) {
// Must use 2 instructions to insure patchable code => use lui and jic
uint32_t lui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 0359be2c94..d8cb8ec3f2 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -36,6 +36,7 @@
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+#include <memory>
#include <set>
@@ -1478,13 +1479,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
- static void UnpackTargetAddress(
- uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
- int16_t& jic_offset); // NOLINT(runtime/references)
- static void UnpackTargetAddressUnsigned(
- uint32_t address,
- uint32_t& lui_offset, // NOLINT(runtime/references)
- uint32_t& jic_offset); // NOLINT(runtime/references)
+ static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset);
+ static void UnpackTargetAddressUnsigned(uint32_t address,
+ uint32_t* lui_offset,
+ uint32_t* jic_offset);
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1515,7 +1514,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src, // NOLINT(runtime/references)
+ MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 2e4698a9e7..760d33d7c9 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1063,7 +1063,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
@@ -1089,7 +1089,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
@@ -1105,7 +1105,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1140,7 +1140,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1177,7 +1177,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
@@ -1256,7 +1256,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1284,7 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1305,13 +1305,13 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lw(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand dest = rs;
- AdjustBaseAndOffset(dest);
+ AdjustBaseAndOffset(&dest);
sw(rd, dest);
}
@@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
- offset = GetOffset(offset, L, bits);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt) {
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
- scratch = GetRtAsRegisterHelper(rt, scratch);
- offset = GetOffset(offset, L, bits);
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
@@ -2955,23 +2955,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beqc(rs, scratch, offset);
}
@@ -2980,16 +2980,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bnec(rs, scratch, offset);
}
@@ -3001,14 +3001,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
@@ -3017,17 +3017,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
@@ -3038,14 +3038,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
@@ -3054,17 +3054,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
@@ -3077,14 +3077,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
@@ -3093,17 +3093,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
@@ -3114,13 +3114,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
@@ -3129,17 +3129,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
@@ -3418,7 +3418,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
@@ -3440,11 +3440,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3456,14 +3456,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3477,11 +3477,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3493,14 +3493,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3751,8 +3751,8 @@ void TurboAssembler::Jump(Register target, const Operand& offset,
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
!is_int16(offset.immediate())) {
uint32_t aui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
- jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset,
+ &jic_offset);
RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
aui(target, target, aui_offset);
if (cond == cc_always) {
@@ -3790,7 +3790,7 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
// This is not an issue, t9 is expected to be clobbered anyway.
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target);
}
@@ -3853,10 +3853,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::Jump(const ExternalReference& reference) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, reference);
- Jump(scratch);
+ li(t9, reference);
+ Jump(t9);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -3940,7 +3938,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
int32_t target_int = static_cast<int32_t>(target);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
+ UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target_int);
}
@@ -3990,7 +3988,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
Call(code.address(), rmode, cond, rs, rt, bd);
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index d9c372f868..e82c88f0b5 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits,
- Register& scratch, // NOLINT(runtime/references)
- const Operand& rt);
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);