summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips/macro-assembler-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips/macro-assembler-mips.cc')
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1172
1 files changed, 600 insertions, 572 deletions
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 84cf23c832..35953e509a 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -38,6 +38,14 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
}
}
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ return rt.immediate() == 0;
+ }
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -821,6 +829,70 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+ Slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+ Sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+}
+
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
@@ -901,37 +973,38 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+ Register input = src;
if (operand_size == 2) {
- Seh(src, src);
+ input = dest;
+ Seh(dest, src);
} else if (operand_size == 1) {
- Seb(src, src);
+ input = dest;
+ Seb(dest, src);
}
// No need to do any preparation if operand_size is 4
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- wsbh(dest, src);
+ wsbh(dest, input);
rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
- Register tmp = t0;
- Register tmp2 = t1;
+ Register tmp = at;
+ Register tmp2 = t8;
+ DCHECK(dest != tmp && dest != tmp2);
+ DCHECK(src != tmp && src != tmp2);
- andi(tmp2, src, 0xFF);
- sll(tmp2, tmp2, 24);
- or_(tmp, zero_reg, tmp2);
+ andi(tmp2, input, 0xFF);
+ sll(tmp, tmp2, 24);
- andi(tmp2, src, 0xFF00);
+ andi(tmp2, input, 0xFF00);
sll(tmp2, tmp2, 8);
or_(tmp, tmp, tmp2);
- srl(src, src, 8);
- andi(tmp2, src, 0xFF00);
+ srl(tmp2, input, 8);
+ andi(tmp2, tmp2, 0xFF00);
or_(tmp, tmp, tmp2);
- srl(src, src, 16);
- andi(tmp2, src, 0xFF);
- or_(tmp, tmp, tmp2);
-
- or_(dest, tmp, zero_reg);
+ srl(tmp2, input, 24);
+ or_(dest, tmp, tmp2);
}
}
@@ -940,25 +1013,28 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
DCHECK(operand_size == 1 || operand_size == 2);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ Register input = src;
if (operand_size == 1) {
- andi(src, src, 0xFF);
+ input = dest;
+ andi(dest, src, 0xFF);
} else {
- andi(src, src, 0xFFFF);
+ input = dest;
+ andi(dest, src, 0xFFFF);
}
// No need to do any preparation if operand_size is 4
- wsbh(dest, src);
+ wsbh(dest, input);
rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
if (operand_size == 1) {
- sll(src, src, 24);
+ sll(dest, src, 24);
} else {
- Register tmp = t0;
+ Register tmp = at;
andi(tmp, src, 0xFF00);
- sll(src, src, 24);
+ sll(dest, src, 24);
sll(tmp, tmp, 8);
- or_(dest, tmp, src);
+ or_(dest, tmp, dest);
}
}
}
@@ -1163,47 +1239,54 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
- MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
- lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
- if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
- lwc1(nextfpreg,
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
+ if (IsFp32Mode()) { // fp32 mode.
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
+ lwc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(src.rm() != scratch);
+ lw(scratch,
MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- } else {
- DCHECK(IsFp64Mode() || IsFpxxMode());
- // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- DCHECK(src.rm() != scratch);
- lw(scratch, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- Mthc1(scratch, fd);
+ Mthc1(scratch, fd);
+ }
}
+ CheckTrampolinePoolQuick(1);
}
void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
- MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
- swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
- if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
- swc1(nextfpreg,
- MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- } else {
- DCHECK(IsFp64Mode() || IsFpxxMode());
- // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- DCHECK(src.rm() != t8);
- Mfhc1(t8, fd);
- sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
+ if (IsFp32Mode()) { // fp32 mode.
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
+ swc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK(src.rm() != t8);
+ Mfhc1(t8, fd);
+ sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ }
}
+ CheckTrampolinePoolQuick(1);
}
void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
@@ -1360,33 +1443,33 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high,
void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- sllv(dst_low, src_low, shift);
- Nor(kScratchReg2, zero_reg, shift);
- srl(kScratchReg, src_low, 1);
- srlv(kScratchReg, kScratchReg, kScratchReg2);
- sllv(dst_high, src_high, shift);
- Or(dst_high, dst_high, kScratchReg);
- And(kScratchReg, shift, 32);
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ sllv(dst_low, src_low, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ srl(scratch1, src_low, 1);
+ srlv(scratch1, scratch1, scratch2);
+ sllv(dst_high, src_high, scratch3);
+ Or(dst_high, dst_high, scratch1);
+ And(scratch1, scratch3, 32);
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_high, dst_low);
mov(dst_low, zero_reg);
} else {
- movn(dst_high, dst_low, kScratchReg);
- movn(dst_low, zero_reg, kScratchReg);
+ movn(dst_high, dst_low, scratch1);
+ movn(dst_low, zero_reg, scratch1);
}
bind(&done);
}
void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1399,8 +1482,8 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
} else {
sll(dst_high, src_high, shift);
sll(dst_low, src_low, shift);
- srl(kScratchReg, src_low, 32 - shift);
- Or(dst_high, dst_high, kScratchReg);
+ srl(scratch, src_low, 32 - shift);
+ Or(dst_high, dst_high, scratch);
}
} else if (shift == 32) {
mov(dst_low, zero_reg);
@@ -1414,33 +1497,33 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- srlv(dst_high, src_high, shift);
- Nor(kScratchReg2, zero_reg, shift);
- sll(kScratchReg, src_high, 1);
- sllv(kScratchReg, kScratchReg, kScratchReg2);
- srlv(dst_low, src_low, shift);
- Or(dst_low, dst_low, kScratchReg);
- And(kScratchReg, shift, 32);
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ srlv(dst_high, src_high, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ sll(scratch1, src_high, 1);
+ sllv(scratch1, scratch1, scratch2);
+ srlv(dst_low, src_low, scratch3);
+ Or(dst_low, dst_low, scratch1);
+ And(scratch1, scratch3, 32);
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_low, dst_high);
mov(dst_high, zero_reg);
} else {
- movn(dst_low, dst_high, kScratchReg);
- movn(dst_high, zero_reg, kScratchReg);
+ movn(dst_low, dst_high, scratch1);
+ movn(dst_high, zero_reg, scratch1);
}
bind(&done);
}
void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1454,8 +1537,8 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
srl(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
shift = 32 - shift;
- sll(kScratchReg, src_high, shift);
- Or(dst_low, dst_low, kScratchReg);
+ sll(scratch, src_high, shift);
+ Or(dst_low, dst_low, scratch);
}
} else if (shift == 32) {
mov(dst_high, zero_reg);
@@ -1469,19 +1552,20 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- srav(dst_high, src_high, shift);
- Nor(kScratchReg2, zero_reg, shift);
- sll(kScratchReg, src_high, 1);
- sllv(kScratchReg, kScratchReg, kScratchReg2);
- srlv(dst_low, src_low, shift);
- Or(dst_low, dst_low, kScratchReg);
- And(kScratchReg, shift, 32);
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ srav(dst_high, src_high, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ sll(scratch1, src_high, 1);
+ sllv(scratch1, scratch1, scratch2);
+ srlv(dst_low, src_low, scratch3);
+ Or(dst_low, dst_low, scratch1);
+ And(scratch1, scratch3, 32);
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_low, dst_high);
sra(dst_high, dst_high, 31);
bind(&done);
@@ -1489,8 +1573,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1504,8 +1587,8 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
sra(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
shift = 32 - shift;
- sll(kScratchReg, src_high, shift);
- Or(dst_low, dst_low, kScratchReg);
+ sll(scratch, src_high, shift);
+ Or(dst_low, dst_low, scratch);
}
} else if (shift == 32) {
sra(dst_high, src_high, 31);
@@ -1620,7 +1703,8 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF32(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF32(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
@@ -1644,7 +1728,8 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF64(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF64(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
@@ -1765,7 +1850,8 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF64(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1799,7 +1885,8 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF32(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1818,6 +1905,125 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
bind(&done);
}
+template <typename RoundFunc>
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ Register scratch2 = t9;
+ if (IsMipsArchVariant(kMips32r6)) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_d(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ Label done;
+ Mfhc1(scratch, src);
+ Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
+ mov_d(dst, src);
+ round(this, dst, src);
+ Move(at, scratch2, dst);
+ or_(at, at, scratch2);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_d_l(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ Mthc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_l_d(dst, src);
+ });
+}
+
+template <typename RoundFunc>
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ if (IsMipsArchVariant(kMips32r6)) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_s(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ int32_t kFloat32ExponentBias = 127;
+ int32_t kFloat32MantissaBits = 23;
+ int32_t kFloat32ExponentBits = 8;
+ Label done;
+ mfc1(scratch, src);
+ Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits));
+ mov_s(dst, src);
+ round(this, dst, src);
+ mfc1(at, dst);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_s_w(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ mtc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_w_s(dst, src);
+ });
+}
+
void TurboAssembler::Mthc1(Register rt, FPURegister fs) {
if (IsFp32Mode()) {
mtc1(rt, fs.high());
@@ -1882,205 +2088,68 @@ void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
- Label* nan, Condition cond, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == al) {
- Branch(bd, target);
- return;
- }
+void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
+ FPURegister cmp1, FPURegister cmp2) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ sizeField = sizeField == D ? L : W;
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
+ cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ } else {
+ c(cc, sizeField, cmp1, cmp2);
+ }
+}
- if (IsMipsArchVariant(kMips32r6)) {
- sizeField = sizeField == D ? L : W;
- }
- DCHECK(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- bool long_branch =
- nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
- if (!IsMipsArchVariant(kMips32r6)) {
- if (long_branch) {
- Label skip;
- c(UN, sizeField, cmp1, cmp2);
- bc1f(&skip);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- c(UN, sizeField, cmp1, cmp2);
- bc1t(nan);
- if (bd == PROTECT) {
- nop();
- }
- }
- } else {
- // Use kDoubleCompareReg for comparison result. It has to be unavailable
- // to lithium register allocator.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- if (long_branch) {
- Label skip;
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(&skip, kDoubleCompareReg);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(nan, kDoubleCompareReg);
- if (bd == PROTECT) {
- nop();
- }
- }
- }
- }
+void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareF(sizeField, UN, cmp1, cmp2);
+}
- if (target) {
- bool long_branch =
- target->is_bound() ? !is_near(target) : is_trampoline_emitted();
- if (long_branch) {
- Label skip;
- Condition neg_cond = NegateFpuCondition(cond);
- BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- BranchLong(target, bd);
- bind(&skip);
- } else {
- BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
- }
- }
+void TurboAssembler::BranchTrueShortF(Label* target) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc1nez(target, kDoubleCompareReg);
+ nop();
+ } else {
+ bc1t(target);
+ nop();
}
}
-void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
- Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- switch (cc) {
- case lt:
- c(OLT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ult:
- c(ULT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ugt:
- c(OLE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case uge:
- c(OLT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ule:
- c(ULE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ne: // Unordered or not equal.
- c(EQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ogl:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- }
- }
+void TurboAssembler::BranchFalseShortF(Label* target) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc1eqz(target, kDoubleCompareReg);
+ nop();
} else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- // Use kDoubleCompareReg for comparison result, it is
- // valid in fp64 (FR = 1) mode which is implied for mips32r6.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- switch (cc) {
- case lt:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ult:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case gt:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ugt:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ge:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case uge:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case le:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ule:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case eq:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ueq:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ne:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ogl:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- default:
- CHECK(0);
- }
- }
- }
- if (bd == PROTECT) {
+ bc1f(target);
nop();
}
}
+void TurboAssembler::BranchTrueF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(target);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target);
+ }
+}
+
void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
@@ -2213,6 +2282,115 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
}
}
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
+ const Operand& rt, Condition cond) {
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() == 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionZero(rd, rs);
+ } else {
+ Subu(t9, rs, rt);
+ LoadZeroIfConditionZero(rd, t9);
+ }
+ break;
+ case ne:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() != 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionNotZero(rd, rs);
+ } else {
+ Subu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ break;
+ case greater_equal:
+ Sge(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case less:
+ Slt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case less_equal:
+ Sle(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs > rt
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case Uless:
+ Sltu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case Uless_equal:
+ Sleu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ seleqz(dest, dest, condition);
+ } else {
+ Movn(dest, zero_reg, condition);
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ selnez(dest, dest, condition);
+ } else {
+ Movz(dest, zero_reg, condition);
+ }
+}
+
void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
@@ -2404,7 +2582,8 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, nullptr, eq, double_input, double_scratch);
+ CompareF64(EQ, double_input, double_scratch);
+ BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
@@ -2450,7 +2629,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
- DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
@@ -2601,14 +2780,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
-static inline bool IsZero(const Operand& rt) {
- if (rt.is_reg()) {
- return rt.rm() == zero_reg;
- } else {
- return rt.immediate() == 0;
- }
-}
-
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@@ -4066,27 +4237,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- li(t0, Operand(debug_is_active));
- lb(t0, MemOperand(t0));
- Branch(&skip_hook, eq, t0, Operand(zero_reg));
-
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&call_hook, ne, t0, Operand(zero_reg));
-
- lw(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(t0, &skip_hook);
- lw(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
- And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4245,254 +4402,90 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-static inline void BranchOvfHelper(TurboAssembler* tasm, Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+void TurboAssembler::AddOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- tasm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
+ right_reg = right.rm();
}
-}
-void TurboAssembler::AddBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- if (right.is_reg()) {
- AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ addu(scratch, left, right_reg);
+ xor_(overflow, scratch, left);
+ xor_(at, scratch, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
} else {
- if (IsMipsArchVariant(kMips32r6)) {
- Register right_reg = t9;
- DCHECK(left != right_reg);
- li(right_reg, Operand(right));
- AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Addu(dst, left, right.immediate()); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- Addu(overflow_dst, zero_reg, right);
- xor_(overflow_dst, dst, overflow_dst);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- Addu(dst, left, right.immediate());
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(scratch, zero_reg, right);
- xor_(scratch, dst, scratch);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
+ addu(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(at, dst, right_reg);
+ and_(overflow, overflow, at);
}
}
-void TurboAssembler::AddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- if (IsMipsArchVariant(kMips32r6)) {
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- DCHECK(dst != scratch);
- Register left_reg = left == dst ? scratch : left;
- Register right_reg = right == dst ? t9 : right;
- DCHECK(dst != left_reg);
- DCHECK(dst != right_reg);
- Move(left_reg, left);
- Move(right_reg, right);
- addu(dst, left, right);
- Bnvc(left_reg, right_reg, no_overflow_label);
- } else {
- Bovc(left, right, overflow_label);
- addu(dst, left, right);
- if (no_overflow_label) bc(no_overflow_label);
- }
+void TurboAssembler::SubOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(right != overflow_dst);
- DCHECK(left != scratch);
- DCHECK(right != scratch);
-
- if (left == right && dst == left) {
- mov(overflow_dst, right);
- right = overflow_dst;
- }
-
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::SubBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Subu(dst, left, right.immediate()); // Left is overwritten.
- // Load right since xori takes uint16 as immediate.
- Addu(overflow_dst, zero_reg, right);
- xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
- xor_(scratch, dst, scratch); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- Subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(scratch, zero_reg, right);
- xor_(scratch, left, scratch);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::SubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left == right) {
- mov(dst, zero_reg);
- if (no_overflow_label) {
- Branch(no_overflow_label);
- }
+ right_reg = right.rm();
}
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- subu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
-}
-
-static inline void BranchOvfHelperMult(TurboAssembler* tasm,
- Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
- } else {
- tasm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
- }
-}
-
-void TurboAssembler::MulBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
-
- Mul(overflow_dst, dst, left, right.immediate());
- sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ if (dst == left || dst == right_reg) {
+ subu(scratch, left, right_reg);
+ xor_(overflow, left, scratch);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
+ } else {
+ subu(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
}
}
-void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
+void TurboAssembler::MulOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ Register scratch2 = t9;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
+ } else {
+ right_reg = right.rm();
+ }
- if (IsMipsArchVariant(kMips32r6) && dst == right) {
- mov(scratch, right);
- Mul(overflow_dst, dst, left, scratch);
- sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Mul(overflow, scratch2, left, right_reg);
+ sra(scratch, scratch2, 31);
+ xor_(overflow, overflow, scratch);
+ mov(dst, scratch2);
} else {
- Mul(overflow_dst, dst, left, right);
+ Mul(overflow, dst, left, right_reg);
sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ xor_(overflow, overflow, scratch);
}
-
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -4551,12 +4544,19 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
- li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<int32_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -4943,6 +4943,20 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5013,15 +5027,18 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
max_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_right, nullptr, lt, src1, src2);
- BranchF32(&return_left, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
mfc1(t8, src1);
@@ -5056,15 +5073,18 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
min_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_left, nullptr, lt, src1, src2);
- BranchF32(&return_right, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
mfc1(t8, src1);
@@ -5099,15 +5119,18 @@ void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
max_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_right, nullptr, lt, src1, src2);
- BranchF64(&return_left, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Left equals right => check for -0.
Mfhc1(t8, src1);
@@ -5143,15 +5166,18 @@ void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
min_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_left, nullptr, lt, src1, src2);
- BranchF64(&return_right, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
Mfhc1(t8, src1);
@@ -5382,7 +5408,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ li(kSpeculationPoisonRegister, -1);
+}
} // namespace internal
} // namespace v8