summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64/macro-assembler-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips64/macro-assembler-mips64.cc')
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc2421
1 files changed, 1365 insertions, 1056 deletions
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 3cad6ba82f..7b73ac74e4 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -17,12 +17,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -436,10 +437,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ld(scratch, FieldMemOperand(scratch, offset));
- ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1190,6 +1188,32 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
+void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ dlsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ dsll(tmp, rs, sa);
+ Daddu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1483,6 +1507,31 @@ void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
}
+void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(size <= 64);
+ dextm(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos >= 32 && pos < 64);
+ DCHECK(size < 33);
+ dextu(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(pos + size <= 32);
+ DCHECK(size != 0);
+ dins_(rt, rs, pos, size);
+}
+
+
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
@@ -1494,49 +1543,90 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
- FPURegister scratch) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
+ Cvt_d_uw(fd, t8);
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ // Zero extend int32 in rs.
+ Dext(t9, rs, 0, 32);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_d_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
- DCHECK(!fd.is(scratch));
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
- mthc1(zero_reg, fd);
+ Label msb_clear, conversion_done;
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ Branch(&msb_clear, ge, rs, Operand(zero_reg));
- Label conversion_done;
+ // Rs >= 2^63
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, fd); // In delay slot.
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_s_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rs, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_s_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_s(fd, fd, fd); // In delay slot.
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_s_l(fd, fd);
bind(&conversion_done);
}
@@ -1582,6 +1672,19 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_d(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_s(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
@@ -1636,6 +1739,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(at, 0x43e0000000000000);
+ dmtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_d(scratch, fd, scratch);
+ trunc_l_d(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_d(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF32(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its float representation.
+ li(at, 0x5f000000);
+ mtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_l_s(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_s(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names.
@@ -1669,13 +1868,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (kArchVariant != kMips64r6) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1688,13 +1887,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -1710,7 +1909,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- J(target, bd);
+ BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -2129,28 +2328,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+ DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- J(L, bdslot);
+ BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -2162,17 +2363,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
}
} else {
@@ -2181,10 +2380,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2203,7 +2402,10 @@ void MacroAssembler::Branch(Label* L,
}
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -2212,549 +2414,544 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
}
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- DCHECK(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ bc(offset);
+}
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bne(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- beq(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchShortHelperR6(offset, nullptr);
} else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- }
- break;
- case ne:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- }
- break;
- // Signed comparison.
- case greater:
- if (rt.imm64_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm64_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm64_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm64_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm64_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(is_int16(offset));
+ BranchShortHelper(offset, nullptr, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BranchShortHelper(0, L, bdslot);
+ }
+}
- b(shifted_branch_offset(L, false));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm().is(zero_reg);
+ } else {
+ return rt.immediate() == 0;
+ }
}
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits) >> 2;
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
- int32_t offset = 0;
+
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
Register r2 = no_reg;
- Register scratch = at;
if (rt.is_reg()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
break;
case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc
+ // should be used which has no condition field so is not patchable.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beq(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqc(rs, scratch, offset);
+ }
break;
case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bne(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnec(rs, scratch, offset);
+ }
break;
+
// Signed comparison.
case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(scratch, rs, offset);
}
break;
case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(rs, scratch, offset);
}
break;
case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(rs, scratch, offset);
}
break;
case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(scratch, rs, offset);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(rs, scratch, offset);
}
break;
case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ break; // No code needs to be emitted.
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ }
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+
+ Register scratch = at;
+ int32_t offset32;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
break;
case eq:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, scratch, offset32);
}
break;
case ne:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, scratch, offset32);
}
break;
+
// Signed comparison.
case greater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgtz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
case less:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ blez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ case Uless:
+ if (IsZero(rt)) {
+ return true; // No code needs to be emitted.
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
+}
+
+
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(0, L, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2766,13 +2963,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
@@ -2780,20 +2975,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -2802,230 +2996,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
}
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ balc(offset);
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
+ BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchAndLinkShortHelperR6(offset, nullptr);
+ } else {
+ DCHECK(is_int16(offset));
+ BranchAndLinkShortHelper(offset, nullptr, bdslot);
+ }
+}
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BranchAndLinkShortHelper(0, L, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
+ switch (cond) {
+ case cc_always:
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ break;
+ case eq:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case ne:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= r2
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset = 0;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Ugreater_equal:
+ // rs >= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case Uless:
+ // rs < r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ default:
+ UNREACHABLE();
}
+ return true;
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+ Register scratch = t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ switch (cond) {
+ case cc_always:
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
- default:
- UNREACHABLE();
- }
+ // Signed comparison.
+ case greater:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
}
@@ -3115,6 +3385,10 @@ void MacroAssembler::Call(Register target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -3129,8 +3403,10 @@ void MacroAssembler::Call(Register target,
if (bd == PROTECT)
nop();
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
}
@@ -3208,31 +3484,43 @@ void MacroAssembler::Ret(Condition cond,
}
-void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(L);
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ j(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
-void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- jal(L);
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ jal(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
@@ -3426,12 +3714,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3447,34 +3730,35 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3482,9 +3766,9 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Daddu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sd(scratch2, MemOperand(topaddr));
+ Daddu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3493,28 +3777,23 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3523,34 +3802,34 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3560,19 +3839,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- dsll(scratch2, object_size, kPointerSizeLog2);
- Daddu(scratch2, result, scratch2);
+ dsll(result_end, object_size, kPointerSizeLog2);
+ Daddu(result_end, result, result_end);
} else {
- Daddu(scratch2, result, Operand(object_size));
+ Daddu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(at, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- sd(scratch2, MemOperand(topaddr));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3749,29 +4028,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.reg_code = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- ld(tmp, FieldMemOperand(src, i * kPointerSize));
- sd(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sd(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3858,16 +4133,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sd(filler, MemOperand(start_offset));
- Daddu(start_offset, start_offset, kPointerSize);
+ sd(filler, MemOperand(current_address));
+ Daddu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3917,6 +4192,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch2,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
Label smi_value, done;
// Handle smi values specially.
@@ -3938,10 +4214,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
FPUCanonicalizeNaN(double_result, double_result);
bind(&smi_value);
- // scratch1 is now effective address of the double element.
// Untag and transfer.
- dsrl32(at, value_reg, 0);
- mtc1(at, double_scratch);
+ dsrl32(scratch1, value_reg, 0);
+ mtc1(scratch1, double_scratch);
cvt_d_w(double_result, double_scratch);
bind(&done);
@@ -3950,6 +4225,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
elements_offset));
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element.
sdc1(double_result, MemOperand(scratch1, 0));
}
@@ -4115,8 +4391,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4136,7 +4410,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4164,11 +4437,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4186,21 +4454,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4217,6 +4542,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4226,17 +4552,16 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
- ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Register temp_reg = t0;
+ ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
}
@@ -4254,11 +4579,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4563,6 +4884,89 @@ void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
}
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (right.is_reg()) {
+ DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Daddu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Daddu(dst, left, overflow_dst);
+ xor_(scratch, dst, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ daddu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ daddu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
@@ -4694,6 +5098,83 @@ void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
}
}
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Dsubu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ Dsubu(dst, left, overflow_dst);
+ xor_(scratch, left, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ dsubu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ dsubu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ dsubu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
@@ -4726,24 +5207,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4765,34 +5235,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- ld(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4929,46 +5375,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- ld(dst, GlobalObjectOperand());
- ld(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ld(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- ld(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- ld(at, FieldMemOperand(scratch, offset));
+ ld(scratch, NativeContextMemOperand());
+ ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ld(map_in_out, FieldMemOperand(scratch, offset));
+ ld(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ld(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ld(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ld(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ ld(dst, NativeContextMemOperand());
+ ld(dst, ContextMemOperand(dst, index));
}
@@ -5440,6 +5869,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5703,8 +6143,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5733,28 +6173,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5774,23 +6192,18 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
@@ -5801,93 +6214,7 @@ void MacroAssembler::EnsureNotWhite(
lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
}
And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- dsll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(zero_reg));
- // Adjust length for UC16.
- dsll(t9, t9, 1);
- bind(&skip);
- }
- Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- DCHECK(!length.is(t8));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -6092,17 +6419,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -6113,18 +6436,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -6136,7 +6460,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -6154,25 +6478,10 @@ void CodePatcher::Emit(Address addr) {
}
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- DCHECK(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- DCHECK(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
+void CodePatcher::ChangeBranchCondition(Instr current_instr,
+ uint32_t new_opcode) {
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
+ masm_.emit(current_instr);
}