summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64/macro-assembler-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips64/macro-assembler-mips64.cc')
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc792
1 files changed, 664 insertions, 128 deletions
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index fb83fe9b76..aa0de26b88 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -17,6 +17,18 @@
namespace v8 {
namespace internal {
+// Floating point constants.
+const uint64_t kDoubleSignMask = Double::kSignMask;
+const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
+const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
+const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
+
+const uint32_t kSingleSignMask = kBinary32SignMask;
+const uint32_t kSingleExponentMask = kBinary32ExponentMask;
+const uint32_t kSingleExponentShift = kBinary32ExponentShift;
+const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
+const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
+
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
@@ -29,7 +41,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
}
}
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -1321,37 +1332,245 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
+void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bnvc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bovc(rs, rt, L);
+ }
+}
+
+void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bovc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bnvc(rs, rt, L);
+ }
+}
// ------------Pseudo-instructions-------------
+// Change endianness
+void MacroAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
+ operand_size == 8);
+ DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
+ if (operand_size == 1) {
+ seb(src, src);
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 2) {
+ seh(src, src);
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 4) {
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else {
+ dsbh(dest, src);
+ dshd(dest, dest);
+ }
+}
+
+void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+ if (operand_size == 1) {
+ andi(src, src, 0xFF);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 2) {
+ andi(src, src, 0xFFFF);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else {
+ dsll32(src, src, 0);
+ dsrl32(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ }
+}
+
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
- lwr(rd, rs);
- lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lw(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsLwrOffset) &&
+ is_int16(rs.offset() + kMipsLwlOffset)) {
+ if (!rd.is(rs.rm())) {
+ lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ } else {
+ lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ mov(rd, at);
+ }
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ lwr(rd, MemOperand(at, kMipsLwrOffset));
+ lwl(rd, MemOperand(at, kMipsLwlOffset));
+ }
+ }
+}
+
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
+ if (kArchVariant == kMips64r6) {
+ lwu(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Ulw(rd, rs);
+ Dext(rd, rd, 0, 32);
+ }
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
- swr(rd, rs);
- swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ sw(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsSwrOffset) &&
+ is_int16(rs.offset() + kMipsSwlOffset)) {
+ swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+ swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+ } else {
+ LoadRegPlusOffsetToAt(rs);
+ swr(rd, MemOperand(at, kMipsSwrOffset));
+ swl(rd, MemOperand(at, kMipsSwlOffset));
+ }
+ }
}
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lh(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lb(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lb(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lb(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
+ }
+ dsll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
-// Do 64-bit load from unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
- // Assert fail if the offset from start of object IS actually aligned.
- // ONLY use with known misalignment, since there is performance cost.
- DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- if (kArchEndian == kLittle) {
- lwu(rd, rs);
- lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsll32(scratch, scratch, 0);
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lhu(rd, rs);
} else {
- lw(rd, rs);
- lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsll32(rd, rd, 0);
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lbu(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
+ }
+ dsll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ DCHECK(!rs.rm().is(scratch));
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ sh(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ MemOperand source = rs;
+ // If offset > 16 bits, load address to at with offset 0.
+ if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+ LoadRegPlusOffsetToAt(rs);
+ source = MemOperand(at, 0);
+ }
+
+ if (!scratch.is(rd)) {
+ mov(scratch, rd);
+ }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ sb(scratch, source);
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, source);
+#endif
+ }
+}
+
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ ld(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsLdrOffset) &&
+ is_int16(rs.offset() + kMipsLdlOffset)) {
+ if (!rd.is(rs.rm())) {
+ ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+ ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+ } else {
+ ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+ ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+ mov(rd, at);
+ }
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ ldr(rd, MemOperand(at, kMipsLdrOffset));
+ ldl(rd, MemOperand(at, kMipsLdlOffset));
+ }
}
- Daddu(rd, rd, scratch);
}
@@ -1366,21 +1585,22 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch);
}
-
-// Do 64-bit store to unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
- // Assert fail if the offset from start of object IS actually aligned.
- // ONLY use with known misalignment, since there is performance cost.
- DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- if (kArchEndian == kLittle) {
- sw(rd, rs);
- dsrl32(scratch, rd, 0);
- sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ sd(rd, rs);
} else {
- sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsrl32(scratch, rd, 0);
- sw(scratch, rs);
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsSdrOffset) &&
+ is_int16(rs.offset() + kMipsSdlOffset)) {
+ sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
+ sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
+ } else {
+ LoadRegPlusOffsetToAt(rs);
+ sdr(rd, MemOperand(at, kMipsSdrOffset));
+ sdl(rd, MemOperand(at, kMipsSdlOffset));
+ }
}
}
@@ -1393,23 +1613,56 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (kArchVariant == kMips64r6) {
+ lwc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Ulw(scratch, rs);
+ mtc1(scratch, fd);
+ }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (kArchVariant == kMips64r6) {
+ swc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ mfc1(scratch, fd);
+ Usw(scratch, rs);
+ }
+}
-void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- li(dst, Operand(value), mode);
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- li(dst, Operand(cell));
- ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- li(dst, Operand(value));
- }
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ ldc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Uld(scratch, rs);
+ dmtc1(scratch, fd);
}
}
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ sdc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ dmfc1(scratch, fd);
+ Usd(scratch, rs);
+ }
+}
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ li(dst, Operand(value), mode);
+}
+
static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
if ((imm >> (bitnum - 1)) & 0x1) {
imm = (imm >> bitnum) + 1;
@@ -1706,6 +1959,61 @@ void MacroAssembler::Ins(Register rt,
ins_(rt, rs, pos, size);
}
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (kArchVariant == kMips64r2) {
+ Label is_nan, done;
+ BranchF32(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_s will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_s(fd, fs); // In delay slot.
+ bind(&is_nan);
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ bind(&done);
+ } else {
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ }
+}
+
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (kArchVariant == kMips64r2) {
+ Label is_nan, done;
+ BranchF64(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_d will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_d(fd, fs); // In delay slot.
+ bind(&is_nan);
+ dmfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~Double::kSignMask));
+ And(scratch1, scratch1, Operand(Double::kSignMask));
+ Xor(scratch1, scratch1, Operand(Double::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+ bind(&done);
+ } else {
+ dmfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~Double::kSignMask));
+ And(scratch1, scratch1, Operand(Double::kSignMask));
+ Xor(scratch1, scratch1, Operand(Double::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+ }
+}
void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
@@ -2279,14 +2587,12 @@ void MacroAssembler::Move(FPURegister dst, float imm) {
void MacroAssembler::Move(FPURegister dst, double imm) {
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value_rep(imm);
+ int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
- if (value_rep == zero && has_double_zero_reg_set_) {
+ if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
- neg_d(dst, kDoubleRegZero);
+ } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
@@ -3843,9 +4149,6 @@ void MacroAssembler::Call(Address target,
Label start;
bind(&start);
int64_t target_int = reinterpret_cast<int64_t>(target);
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
@@ -4142,12 +4445,14 @@ void MacroAssembler::Allocate(int object_size,
// to calculate the new top.
Daddu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
- sd(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Daddu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sd(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ Daddu(result, result, Operand(kHeapObjectTag));
}
@@ -4217,6 +4522,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
Daddu(result_end, result, Operand(object_size));
}
+
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
@@ -4224,14 +4530,91 @@ void MacroAssembler::Allocate(Register object_size, Register result,
And(at, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- sd(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Daddu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sd(result_end, MemOperand(top_address));
+ }
+
+ // Tag object if.
+ Daddu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, at));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+ ld(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on MIPS64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
}
+
+ // Calculate new top and write it back.
+ Daddu(result_end, result, Operand(object_size));
+ sd(result_end, MemOperand(top_address));
+
+ Daddu(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, at));
+ DCHECK(!AreAliased(result_end, result, scratch, at));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch;
+ li(top_address, Operand(allocation_top));
+ ld(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on MIPS64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+ }
+
+ // Calculate new top and write it back
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Dlsa(result_end, result, object_size, kPointerSizeLog2);
+ } else {
+ Daddu(result_end, result, Operand(object_size));
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ And(at, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
+ }
+
+ Daddu(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -4248,12 +4631,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -4277,12 +4656,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -4296,7 +4671,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -4309,12 +4684,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -4327,7 +4698,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -4343,7 +4714,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -4369,12 +4740,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* need_gc,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -4382,11 +4752,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -4410,7 +4776,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -4602,6 +4969,72 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
sdc1(double_result, MemOperand(scratch1, 0));
}
+void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
+ FPURegister fs,
+ FPURegister ft) {
+ FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+ Label check_nan, save_payload, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+
+ sub_s(dest, fs, ft);
+ // Check if the result of subtraction is NaN.
+ BranchF32(nullptr, &check_nan, eq, fs, ft);
+ Branch(USE_DELAY_SLOT, &done);
+ dest.is(fd) ? nop() : mov_s(fd, dest);
+
+ bind(&check_nan);
+ // Check if first operand is a NaN.
+ mfc1(scratch1, fs);
+ BranchF32(nullptr, &save_payload, eq, fs, fs);
+ // Second operand must be a NaN.
+ mfc1(scratch1, ft);
+
+ bind(&save_payload);
+ // Reserve payload.
+ And(scratch1, scratch1,
+ Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
+ mfc1(scratch2, dest);
+ And(scratch2, scratch2, Operand(kSingleNaNMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+
+ bind(&done);
+}
+
+void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
+ FPURegister fs,
+ FPURegister ft) {
+ FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+ Label check_nan, save_payload, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+
+ sub_d(dest, fs, ft);
+ // Check if the result of subtraction is NaN.
+ BranchF64(nullptr, &check_nan, eq, fs, ft);
+ Branch(USE_DELAY_SLOT, &done);
+ dest.is(fd) ? nop() : mov_d(fd, dest);
+
+ bind(&check_nan);
+ // Check if first operand is a NaN.
+ dmfc1(scratch1, fs);
+ BranchF64(nullptr, &save_payload, eq, fs, fs);
+ // Second operand must be a NaN.
+ dmfc1(scratch1, ft);
+
+ bind(&save_payload);
+ // Reserve payload.
+ li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
+ Dsubu(at, at, Operand(1));
+ And(scratch1, scratch1, at);
+ dmfc1(scratch2, dest);
+ And(scratch2, scratch2, Operand(kDoubleNaNMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+
+ bind(&done);
+}
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
@@ -4890,11 +5323,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- li(t0, Operand(step_in_enabled));
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ li(t0, Operand(last_step_action));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ Branch(&skip_flooding, lt, t0, Operand(StepIn));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -5250,9 +5684,9 @@ void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
Move(left_reg, left);
Move(right_reg, right);
addu(dst, left, right);
- bnvc(left_reg, right_reg, no_overflow_label);
+ Bnvc(left_reg, right_reg, no_overflow_label);
} else {
- bovc(left, right, overflow_label);
+ Bovc(left, right, overflow_label);
addu(dst, left, right);
if (no_overflow_label) bc(no_overflow_label);
}
@@ -5515,6 +5949,78 @@ void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
+static inline void BranchOvfHelperMult(MacroAssembler* masm,
+ Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+
+ if (dst.is(left)) {
+ Mul(scratch, left, static_cast<int32_t>(right.immediate()));
+ Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
+ mov(dst, scratch);
+ } else {
+ Mul(dst, left, static_cast<int32_t>(right.immediate()));
+ Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
+ }
+
+ dsra32(scratch, dst, 0);
+ xor_(overflow_dst, overflow_dst, scratch);
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ if (dst.is(left) || dst.is(right)) {
+ Mul(scratch, left, right);
+ Mulh(overflow_dst, left, right);
+ mov(dst, scratch);
+ } else {
+ Mul(dst, left, right);
+ Mulh(overflow_dst, left, right);
+ }
+
+ dsra32(scratch, dst, 0);
+ xor_(overflow_dst, overflow_dst, scratch);
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
@@ -5557,11 +6063,12 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd) {
+ BranchDelaySlot bd,
+ bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
@@ -5570,13 +6077,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch1, Operand(value));
li(scratch2, Operand(ExternalReference(counter)));
- sd(scratch1, MemOperand(scratch2));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5586,9 +6092,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- ld(scratch1, MemOperand(scratch2));
- Daddu(scratch1, scratch1, Operand(value));
- sd(scratch1, MemOperand(scratch2));
+ lw(scratch1, MemOperand(scratch2));
+ Addu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5598,9 +6104,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- ld(scratch1, MemOperand(scratch2));
- Dsubu(scratch1, scratch1, Operand(value));
- sd(scratch1, MemOperand(scratch2));
+ lw(scratch1, MemOperand(scratch2));
+ Subu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5660,16 +6166,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(Smi::FromInt(reason)));
- push(a0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -5779,9 +6288,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- ld(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -5827,7 +6335,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ld(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(ra, fp);
+ Move(fp, sp);
+ Push(context, target, argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(context, target, argc);
+ Pop(ra, fp);
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -5847,7 +6372,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sd(ra, MemOperand(sp, 4 * kPointerSize));
sd(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+ li(at, Operand(Smi::FromInt(frame_type)));
sd(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -6225,6 +6750,16 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAGeneratorObject, t8,
+ Operand(JS_GENERATOR_OBJECT_TYPE));
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -6696,7 +7231,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6705,14 +7240,16 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ li(at, Operand(new_space_allocation_top_adr));
+ ld(at, MemOperand(at));
+ Xor(scratch_reg, scratch_reg, Operand(at));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -6721,13 +7258,13 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- li(at, Operand(new_space_allocation_top));
- lw(at, MemOperand(at));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ li(at, Operand(new_space_allocation_top_adr));
+ ld(at, MemOperand(at));
Branch(no_memento_found, gt, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
- lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Branch(no_memento_found, ne, scratch_reg,
Operand(isolate()->factory()->allocation_memento_map()));
}
@@ -6747,8 +7284,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);