summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64/macro-assembler-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips64/macro-assembler-mips64.cc')
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc394
1 files changed, 200 insertions, 194 deletions
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 83ceae69d1..889d09f27e 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -9,7 +9,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -21,14 +20,15 @@
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -40,17 +40,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
- has_double_zero_reg_set_(false) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
@@ -138,14 +127,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
}
@@ -223,6 +212,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
@@ -949,6 +939,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
@@ -972,6 +963,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
@@ -987,6 +979,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
@@ -1001,6 +994,7 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
@@ -1025,6 +1019,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
@@ -1038,6 +1033,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
@@ -1128,23 +1124,14 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
// Change endianness
void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
- operand_size == 8);
+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
- if (operand_size == 1) {
- seb(src, src);
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
- } else if (operand_size == 2) {
- seh(src, src);
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ if (operand_size == 2) {
+ wsbh(dest, src);
+ seh(dest, dest);
} else if (operand_size == 4) {
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
} else {
dsbh(dest, src);
dshd(dest, dest);
@@ -1153,20 +1140,14 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
- if (operand_size == 1) {
- andi(src, src, 0xFF);
- dsbh(dest, src);
- dshd(dest, dest);
- } else if (operand_size == 2) {
- andi(src, src, 0xFFFF);
- dsbh(dest, src);
- dshd(dest, dest);
+ DCHECK(operand_size == 2 || operand_size == 4);
+ if (operand_size == 2) {
+ wsbh(dest, src);
+ andi(dest, dest, 0xFFFF);
} else {
- dsll32(src, src, 0);
- dsrl32(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
+ dinsu_(dest, zero_reg, 32, 32);
}
}
@@ -1572,22 +1553,22 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
@@ -2141,11 +2122,14 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
@@ -2157,12 +2141,14 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
@@ -2190,12 +2176,14 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
@@ -2207,12 +2195,14 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
@@ -2263,6 +2253,7 @@ void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Trunc_l_ud(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Load to GPR.
dmfc1(t8, fs);
// Reset sign bit.
@@ -2868,6 +2859,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
const Operand& rt, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
mov(rd, zero_reg);
@@ -3085,6 +3077,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
srl(scratch, rs, 1);
@@ -3113,6 +3106,7 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
dsrl(scratch, rs, 1);
@@ -3202,6 +3196,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
Label* done) {
DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
@@ -3224,7 +3219,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -3234,7 +3230,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
Ld(result, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kDoubleSize));
@@ -3395,6 +3395,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
// Be careful to always use shifted_branch_offset only just before the
@@ -3609,6 +3610,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
if (!is_near(L, OffsetSize::kOffset16)) return false;
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
int32_t offset32;
@@ -4118,54 +4120,28 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
-
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- Ld(destination, FieldMemOperand(destination, FixedArray::kHeaderSize +
- index * kPointerSize));
+ Ld(destination,
+ FieldMemOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld(destination, MemOperand(kRootRegister, offset));
+}
- Ld(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Daddu(destination, kRootRegister, Operand(offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
@@ -4200,9 +4176,12 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
- li(t9, Operand(target, rmode));
- Jump(t9, al, zero_reg, Operand(zero_reg), bd);
- bind(&skip);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t9, Operand(target, rmode));
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
+ }
}
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -4215,14 +4194,27 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(t9, cond, rs, rt, bd);
- return;
+ if (FLAG_embedded_builtins) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(t9, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t9, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
@@ -4306,14 +4298,26 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(t9, cond, rs, rt, bd);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(t9, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(t9, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4781,11 +4785,10 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register temp_reg = t0;
Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // The argument count is stored as int32_t on 64-bit platforms.
- // TODO(plind): Smi on 32-bit platforms.
- Lw(expected_reg,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(a1, new_target, expected, actual, flag);
}
@@ -4857,6 +4860,7 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
void TurboAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4886,6 +4890,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left,
void TurboAssembler::DsubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4915,6 +4920,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left,
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4941,8 +4947,8 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
xor_(overflow, overflow, scratch);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4950,9 +4956,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
PrepareCEntryArgs(f->nargs);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, a0, a1));
+ Daddu(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -5050,18 +5056,17 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
Move(a0, Smi::FromInt(static_cast<int>(reason)));
@@ -5106,14 +5111,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
- int stack_offset, fp_offset;
- if (type == StackFrame::INTERNAL) {
- stack_offset = -4 * kPointerSize;
- fp_offset = 2 * kPointerSize;
- } else {
- stack_offset = -3 * kPointerSize;
- fp_offset = 1 * kPointerSize;
- }
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int stack_offset = -3 * kPointerSize;
+ const int fp_offset = 1 * kPointerSize;
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
Sd(ra, MemOperand(sp, stack_offset));
@@ -5122,14 +5122,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
Sd(t9, MemOperand(sp, stack_offset));
- if (type == StackFrame::INTERNAL) {
- DCHECK_EQ(stack_offset, kPointerSize);
- li(t9, CodeObject());
- Sd(t9, MemOperand(sp, 0));
- } else {
- DCHECK_EQ(stack_offset, 0);
- }
// Adjust FP to point to saved FP.
+ DCHECK_EQ(stack_offset, 0);
Daddu(fp, sp, Operand(fp_offset));
}
@@ -5189,17 +5183,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- // Accessed from ExitFrame::code_slot.
- li(t8, CodeObject(), CONSTANT_SIZE);
- Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Accessed from ExitFrame::code_slot.
+ li(t8, CodeObject(), CONSTANT_SIZE);
+ Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
- // Save the frame pointer and the context in top.
- li(t8,
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
- Sd(fp, MemOperand(t8));
- li(t8,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(cp, MemOperand(t8));
+ // Save the frame pointer and the context in top.
+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ Sd(fp, MemOperand(t8));
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Sd(cp, MemOperand(t8));
+ }
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
@@ -5235,6 +5232,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -5321,10 +5319,11 @@ void MacroAssembler::AssertStackIsAligned() {
}
}
-void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
- Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
+ DCHECK(SmiValuesAre31Bits());
Lw(dst, src);
SmiUntag(dst);
}
@@ -5395,20 +5394,9 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, t8);
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
- Operand(zero_reg));
- GetObjectType(object, t8, t8);
- Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
- Operand(FIXED_ARRAY_TYPE));
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
@@ -5423,6 +5411,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
@@ -5436,6 +5425,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
@@ -5448,6 +5438,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
@@ -5475,9 +5466,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
- LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
bind(&done_checking);
}
}
@@ -5505,10 +5496,13 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
- mfc1(t8, src1);
- dsll32(t8, t8, 0);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5552,10 +5546,13 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- mfc1(t8, src1);
- dsll32(t8, t8, 0);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5599,9 +5596,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_left);
// Left equals right => check for -0.
- dmfc1(t8, src1);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ dmfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5645,9 +5645,12 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- dmfc1(t8, src1);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ dmfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5720,6 +5723,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, function);
CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
}
@@ -5773,14 +5777,16 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != t9) {
+ mov(t9, function);
+ function = t9;
+ }
- if (function != t9) {
- mov(t9, function);
- function = t9;
+ Call(function);
}
- Call(function);
-
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);