summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips/assembler-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips/assembler-mips.cc')
-rw-r--r--deps/v8/src/mips/assembler-mips.cc517
1 files changed, 472 insertions, 45 deletions
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 936a73b5f9..f1e5dfb670 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/mips/assembler-mips-inl.h"
#include "src/serialize.h"
@@ -98,10 +99,32 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+ // FP64 mode is implied on r6.
+ supported_ |= 1u << FP64FPU;
+#endif
+#if defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64FPU;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(FPU_MODE_FPXX)
+ if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
+#elif defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64FPU;
+#endif
+#if defined(_MIPS_ARCH_MIPS32RX)
+ if (cpu.architecture() == 6) {
+ supported_ |= 1u << MIPSr6;
+ } else if (cpu.architecture() == 2) {
+ supported_ |= 1u << MIPSr1;
+ supported_ |= 1u << MIPSr2;
+ } else {
+ supported_ |= 1u << MIPSr1;
+ }
+#endif
#endif
}
@@ -317,7 +340,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -484,7 +507,9 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1EQZ) ||
+ (opcode == COP1 && rs_field == BC1NEZ);
}
@@ -529,12 +554,18 @@ bool Assembler::IsJal(Instr instr) {
bool Assembler::IsJr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ } else {
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
+ }
}
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
}
@@ -1019,6 +1050,88 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
+int32_t Assembler::branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1072,7 +1185,33 @@ void Assembler::bgez(Register rs, int16_t offset) {
}
+void Assembler::bgezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
void Assembler::bgezal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@@ -1087,6 +1226,13 @@ void Assembler::bgtz(Register rs, int16_t offset) {
}
+void Assembler::bgtzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@@ -1094,6 +1240,38 @@ void Assembler::blez(Register rs, int16_t offset) {
}
+void Assembler::blezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@@ -1102,6 +1280,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@@ -1116,6 +1295,101 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
}
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
@@ -1129,12 +1403,16 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+ } else {
+ jalr(rs, zero_reg);
}
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1205,7 +1483,41 @@ void Assembler::subu(Register rd, Register rs, Register rt) {
void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ } else {
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ }
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
@@ -1224,11 +1536,23 @@ void Assembler::div(Register rs, Register rt) {
}
+void Assembler::div(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
+void Assembler::divu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1311,7 +1635,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1321,7 +1645,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1445,6 +1769,14 @@ void Assembler::lui(Register rd, int32_t j) {
}
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+ // This instruction uses same opcode as 'lui'. The difference in encoding is
+ // 'lui' has zero reg. for rs field.
+ DCHECK(is_uint16(j));
+ GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1588,15 +1920,19 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+ }
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1604,13 +1940,13 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- DCHECK(kArchVariant != kLoongson);
+ DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
@@ -1629,12 +1965,20 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ GenInstrImmediate(LW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ mthc1(at, fd);
+ } else {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1646,12 +1990,20 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ } else {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1660,11 +2012,21 @@ void Assembler::mtc1(Register rt, FPURegister fs) {
}
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
@@ -1785,25 +2147,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -1838,13 +2200,45 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -1860,7 +2254,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -1870,7 +2264,32 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
}
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
@@ -2184,7 +2603,7 @@ void Assembler::set_target_address_at(Address pc,
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
// The following code is an optimization for the common case of Call()
// or Jump() which is load to register, and jump through register:
@@ -2227,20 +2646,20 @@ void Assembler::set_target_address_at(Address pc,
if (IsJalr(instr3)) {
// Try to convert JALR to JAL.
if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
patched_jump = true;
}
} else if (IsJr(instr3)) {
// Try to convert JR to J, skip returns (jr ra).
bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
patched_jump = true;
}
} else if (IsJal(instr3)) {
if (in_range) {
// We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
} else {
// Patch JAL, but out of range, revert to JALR.
// JALR rs reg is the rt reg specified in the ORI instruction.
@@ -2252,12 +2671,16 @@ void Assembler::set_target_address_at(Address pc,
} else if (IsJ(instr3)) {
if (in_range) {
// We are patching an already converted J (jump).
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
} else {
// Trying patch J, but out of range, just go back to JR.
// JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
}
patched_jump = true;
}
@@ -2285,19 +2708,23 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
DCHECK(GetOpcodeField(instr1) == LUI);
DCHECK(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc + 2, sizeof(Address));
}
}