summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips/macro-assembler-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips/macro-assembler-mips.cc')
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc822
1 files changed, 639 insertions, 183 deletions
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 6915922729..6f16f1d4c0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -8,12 +8,14 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -207,8 +209,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
}
}
@@ -282,8 +284,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -357,8 +359,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -395,8 +397,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -641,7 +642,7 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, rt.rm());
mflo(rd);
} else {
@@ -651,7 +652,7 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, at);
mflo(rd);
} else {
@@ -661,6 +662,71 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
+void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ muh(rd_hi, rs, rt.rm());
+ mul(rd_lo, rs, rt.rm());
+ } else {
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ mul(rd_lo, rs, rt.rm());
+ muh(rd_hi, rs, rt.rm());
+ }
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ muh(rd_hi, rs, at);
+ mul(rd_lo, rs, at);
+ } else {
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ mul(rd_lo, rs, at);
+ muh(rd_hi, rs, at);
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ muh(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mfhi(rd);
+ } else {
+ muh(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
@@ -697,6 +763,99 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
}
+void MacroAssembler::Div(Register rem, Register res,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, rt.rm());
+ mod(rem, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, at);
+ mod(rem, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ } else {
+ div(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ } else {
+ div(res, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ mod(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mfhi(rd);
+ } else {
+ mod(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ modu(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mfhi(rd);
+ } else {
+ modu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
@@ -709,6 +868,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
}
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mflo(res);
+ } else {
+ divu(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mflo(res);
+ } else {
+ divu(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
@@ -811,7 +992,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -837,7 +1018,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
lw(zero_reg, rs);
} else {
pref(hint, rs);
@@ -1033,7 +1214,7 @@ void MacroAssembler::Ext(Register rt,
DCHECK(pos < 32);
DCHECK(pos + size < 33);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
@@ -1057,7 +1238,7 @@ void MacroAssembler::Ins(Register rt,
DCHECK(pos + size <= 32);
DCHECK(size != 0);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
} else {
DCHECK(!rt.is(t8) && !rs.is(t8));
@@ -1111,8 +1292,8 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd,
// Load 2^31 into f20 as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Add it to fd.
add_d(fd, fd, scratch);
@@ -1129,10 +1310,10 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
trunc_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
trunc_w_d(fd, fs);
}
@@ -1140,10 +1321,10 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
round_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
round_w_d(fd, fs);
}
@@ -1151,10 +1332,10 @@ void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
floor_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
floor_w_d(fd, fs);
}
@@ -1162,10 +1343,10 @@ void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
ceil_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
ceil_w_d(fd, fs);
}
@@ -1180,8 +1361,8 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
@@ -1205,6 +1386,24 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mthc1(rt, fs);
+ } else {
+ mtc1(rt, fs.high());
+ }
+}
+
+
+void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mfhc1(rt, fs);
+ } else {
+ mfc1(rt, fs.high());
+ }
+}
+
+
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
@@ -1220,49 +1419,103 @@ void MacroAssembler::BranchF(Label* target,
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case lt:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- case nue:
- c(UEQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ } else {
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium register allocator.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ }
+ }
+
+ if (!IsMipsArchVariant(kMips32r6)) {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+ } else {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ // Use kDoubleCompareReg for comparison result, it is
+ // valid in fp64 (FR = 1) mode which is implied for mips32r6.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ switch (cc) {
+ case lt:
+ cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case gt:
+ cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case ge:
+ cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case le:
+ cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case eq:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ueq:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ne:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case nue:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ default:
+ CHECK(0);
+ }
}
}
@@ -1297,16 +1550,16 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
- mtc1(at, dst.high());
+ Mthc1(at, dst);
} else {
- mtc1(zero_reg, dst.high());
+ Mthc1(zero_reg, dst);
}
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1318,7 +1571,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1330,7 +1583,7 @@ void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
@@ -1356,7 +1609,7 @@ void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
@@ -1382,7 +1635,7 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void MacroAssembler::Clz(Register rd, Register rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
@@ -1717,7 +1970,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
@@ -1725,7 +1978,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
@@ -1742,7 +1995,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (r2.is(zero_reg)) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
@@ -1824,7 +2077,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -1834,7 +2087,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
@@ -1861,7 +2114,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (rt.imm32_ == 0) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -1963,7 +2216,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -1973,7 +2226,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
@@ -1993,7 +2246,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -2105,7 +2358,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
@@ -2244,7 +2497,7 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
@@ -2308,7 +2561,88 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
default:
UNREACHABLE();
}
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
@@ -2339,7 +2673,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
@@ -2414,7 +2748,100 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
default:
UNREACHABLE();
}
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
+
// Check that offset could actually hold on an int16_t.
DCHECK(is_int16(offset));
@@ -3136,12 +3563,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3149,7 +3574,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -3158,11 +3583,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3181,11 +3603,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -3193,11 +3614,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3217,24 +3635,21 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -4128,8 +4543,34 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(overflow_dst, dst, t9);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addiu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, dst, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4170,8 +4611,34 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, -(right.immediate())); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, scratch, t9); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ addiu(dst, left, -(right.immediate()));
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, left, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4602,7 +5069,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// The stack must be allign to 0 modulo 8 for stores with sdc1.
DCHECK(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
int space = FPURegister::kMaxNumRegisters * kDoubleSize;
@@ -4620,7 +5087,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(stack_space >= 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -4715,7 +5182,7 @@ void MacroAssembler::AssertStackIsAligned() {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -4979,71 +5446,59 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
And(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+ DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+ andi(scratch2, second, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+ And(scratch, type, Operand(kFlatOneByteStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
@@ -5116,7 +5571,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -5173,7 +5628,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -5446,8 +5901,8 @@ void MacroAssembler::EnsureNotWhite(
bind(&skip);
}
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -5746,17 +6201,18 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(result));
DCHECK(!dividend.is(at));
DCHECK(!result.is(at));
- MultiplierAndShift ms(divisor);
- li(at, Operand(ms.multiplier()));
- Mult(dividend, Operand(at));
- mfhi(result);
- if (divisor > 0 && ms.multiplier() < 0) {
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ li(at, Operand(mag.multiplier));
+ Mulh(result, dividend, Operand(at));
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
Addu(result, result, Operand(dividend));
}
- if (divisor < 0 && ms.multiplier() > 0) {
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
Subu(result, result, Operand(dividend));
}
- if (ms.shift() > 0) sra(result, result, ms.shift());
+ if (mag.shift > 0) sra(result, result, mag.shift);
srl(at, dividend, 31);
Addu(result, result, Operand(at));
}