diff options
Diffstat (limited to 'deps/v8/src/mips/macro-assembler-mips.cc')
-rw-r--r-- | deps/v8/src/mips/macro-assembler-mips.cc | 338 |
1 files changed, 206 insertions, 132 deletions
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index c10602df48..e0de62e1da 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -6,41 +6,33 @@ #if V8_TARGET_ARCH_MIPS +#include "src/assembler-inl.h" #include "src/base/bits.h" #include "src/base/division-by-constant.h" #include "src/bootstrapper.h" #include "src/callable.h" #include "src/code-factory.h" -#include "src/code-stubs.h" +#include "src/counters.h" #include "src/debug/debug.h" #include "src/external-reference-table.h" #include "src/frames-inl.h" -#include "src/instruction-stream.h" -#include "src/mips/assembler-mips-inl.h" -#include "src/mips/macro-assembler-mips.h" +#include "src/macro-assembler.h" +#include "src/objects/heap-number.h" #include "src/register-configuration.h" #include "src/runtime/runtime.h" +#include "src/snapshot/embedded-data.h" #include "src/snapshot/snapshot.h" #include "src/wasm/wasm-code-manager.h" +// Satisfy cpplint check, but don't include platform-specific header. It is +// included recursively via macro-assembler.h. +#if 0 +#include "src/mips/macro-assembler-mips.h" +#endif + namespace v8 { namespace internal { -MacroAssembler::MacroAssembler(Isolate* isolate, - const AssemblerOptions& options, void* buffer, - int size, CodeObjectRequired create_code_object) - : TurboAssembler(isolate, options, buffer, size, create_code_object) { - if (create_code_object == CodeObjectRequired::kYes) { - // Unlike TurboAssembler, which can be used off the main thread and may not - // allocate, macro assembler creates its own copy of the self-reference - // marker in order to disambiguate between self-references during nested - // code generation (e.g.: codegen of the current object triggers stub - // compilation through CodeStub::GetCode()). - code_object_ = Handle<HeapObject>::New( - *isolate->factory()->NewSelfReferenceMarker(), isolate); - } -} - static inline bool IsZero(const Operand& rt) { if (rt.is_reg()) { return rt.rm() == zero_reg; @@ -128,14 +120,16 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, } void TurboAssembler::LoadRoot(Register destination, RootIndex index) { - lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index))); + lw(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } void TurboAssembler::LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2) { Branch(2, NegateCondition(cond), src1, src2); - lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index))); + lw(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } @@ -259,24 +253,42 @@ void TurboAssembler::RestoreRegisters(RegList registers) { void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { + CallRecordWriteStub( + object, address, remembered_set_action, fp_mode, + isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), + kNullAddress); +} + +void TurboAssembler::CallRecordWriteStub( + Register object, Register address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + Address wasm_target) { + CallRecordWriteStub(object, address, remembered_set_action, fp_mode, + Handle<Code>::null(), wasm_target); +} + +void TurboAssembler::CallRecordWriteStub( + Register object, Register address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + Handle<Code> code_target, Address wasm_target) { + DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, // i.e. always emit remember set and save FP registers in RecordWriteStub. If // large performance regression is observed, we should use these values to // avoid unnecessary work. - Callable const callable = - Builtins::CallableFor(isolate(), Builtins::kRecordWrite); - RegList registers = callable.descriptor().allocatable_registers(); + RecordWriteDescriptor descriptor; + RegList registers = descriptor.allocatable_registers(); SaveRegisters(registers); - Register object_parameter(callable.descriptor().GetRegisterParameter( - RecordWriteDescriptor::kObject)); + Register object_parameter( + descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); Register slot_parameter( - callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot)); - Register remembered_set_parameter(callable.descriptor().GetRegisterParameter( - RecordWriteDescriptor::kRememberedSet)); - Register fp_mode_parameter(callable.descriptor().GetRegisterParameter( - RecordWriteDescriptor::kFPMode)); + descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); + Register remembered_set_parameter( + descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); + Register fp_mode_parameter( + descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); Push(object); Push(address); @@ -286,7 +298,11 @@ void TurboAssembler::CallRecordWriteStub( Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); - Call(callable.code(), RelocInfo::CODE_TARGET); + if (code_target.is_null()) { + Call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else { + Call(code_target, RelocInfo::CODE_TARGET); + } RestoreRegisters(registers); } @@ -1724,8 +1740,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - Subu(scratch, pos, Operand(32)); - Neg(scratch, Operand(scratch)); + Subu(scratch, zero_reg, pos); Ror(dest, dest, scratch); } } @@ -3644,8 +3659,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { - DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( - RootIndex::kBuiltinsConstantsTable)); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); lw(destination, FieldMemOperand(destination, @@ -3795,23 +3809,38 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, DCHECK(RelocInfo::IsCodeTarget(rmode)); BlockTrampolinePoolScope block_trampoline_pool(this); if (FLAG_embedded_builtins) { - if (root_array_available_ && options().isolate_independent_code) { + int builtin_index = Builtins::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && + Builtins::IsIsolateIndependent(builtin_index); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond != cc_always) { + // By using delay slot, we always execute first instruction of + // GenPcRelativeJump (which is or_(t8, ra, zero_reg)). + Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); + } + GenPCRelativeJump(t8, t9, code_target_index, + RelocInfo::RELATIVE_CODE_TARGET, bd); + bind(&skip); + return; + } else if (root_array_available_ && options().isolate_independent_code) { IndirectLoadConstant(t9, code); Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); return; - } else if (options().inline_offheap_trampolines) { - int builtin_index = Builtins::kNoBuiltinId; - if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && - Builtins::IsIsolateIndependent(builtin_index)) { - // Inline the trampoline. - RecordCommentForOffHeapTrampoline(builtin_index); - CHECK_NE(builtin_index, Builtins::kNoBuiltinId); - EmbeddedData d = EmbeddedData::FromBlob(); - Address entry = d.InstructionStartOfBuiltin(builtin_index); - li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); - Jump(t9, 0, cond, rs, rt, bd); - return; - } + } else if (target_is_isolate_independent_builtin && + options().inline_offheap_trampolines) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin_index); + CHECK_NE(builtin_index, Builtins::kNoBuiltinId); + EmbeddedData d = EmbeddedData::FromBlob(); + Address entry = d.InstructionStartOfBuiltin(builtin_index); + li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); + Jump(t9, 0, cond, rs, rt, bd); + return; } } Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd); @@ -3902,23 +3931,36 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (FLAG_embedded_builtins) { - if (root_array_available_ && options().isolate_independent_code) { + int builtin_index = Builtins::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && + Builtins::IsIsolateIndependent(builtin_index); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond != cc_always) { + Branch(PROTECT, &skip, NegateCondition(cond), rs, rt); + } + GenPCRelativeJumpAndLink(t8, code_target_index, + RelocInfo::RELATIVE_CODE_TARGET, bd); + bind(&skip); + return; + } else if (root_array_available_ && options().isolate_independent_code) { IndirectLoadConstant(t9, code); Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); return; - } else if (options().inline_offheap_trampolines) { - int builtin_index = Builtins::kNoBuiltinId; - if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && - Builtins::IsIsolateIndependent(builtin_index)) { - // Inline the trampoline. - RecordCommentForOffHeapTrampoline(builtin_index); - CHECK_NE(builtin_index, Builtins::kNoBuiltinId); - EmbeddedData d = EmbeddedData::FromBlob(); - Address entry = d.InstructionStartOfBuiltin(builtin_index); - li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); - Call(t9, 0, cond, rs, rt, bd); - return; - } + } else if (target_is_isolate_independent_builtin && + options().inline_offheap_trampolines) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin_index); + CHECK_NE(builtin_index, Builtins::kNoBuiltinId); + EmbeddedData d = EmbeddedData::FromBlob(); + Address entry = d.InstructionStartOfBuiltin(builtin_index); + li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); + Call(t9, 0, cond, rs, rt, bd); + return; } } DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -3926,6 +3968,57 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } +void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { + STATIC_ASSERT(kSystemPointerSize == 4); + STATIC_ASSERT(kSmiShiftSize == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + + // The builtin_pointer register contains the builtin index as a Smi. + SmiUntag(builtin_pointer, builtin_pointer); + Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2); + lw(builtin_pointer, + MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset())); + Call(builtin_pointer); +} + +void TurboAssembler::StoreReturnAddressAndCall(Register target) { + // This generates the final instruction sequence for calls to C functions + // once an exit frame has been constructed. + // + // Note that this assumes the caller code (i.e. the Code object currently + // being generated) is immovable or that the callee function cannot trigger + // GC, since the callee function will return to it. + + Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); + static constexpr int kNumInstructionsToJump = 4; + Label find_ra; + // Adjust the value in ra to point to the correct return location, 2nd + // instruction past the real call into C code (the jalr(t9)), and push it. + // This is the return address of the exit frame. + if (kArchVariant >= kMips32r6) { + addiupc(ra, kNumInstructionsToJump + 1); + } else { + // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS + nal(); // nal has branch delay slot. + Addu(ra, ra, kNumInstructionsToJump * kInstrSize); + } + bind(&find_ra); + + // This spot was reserved in EnterExitFrame. + sw(ra, MemOperand(sp)); + // Stack space reservation moved to the branch delay slot below. + // Stack is still aligned. + + // Call the C routine. + mov(t9, target); // Function pointer to t9 to conform to ABI for PIC. + jalr(t9); + // Set up sp in the delay slot. + addiu(sp, sp, -kCArgsSlotsSize); + // Make sure the stored 'ra' points to this position. + DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); +} + void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Jump(ra, 0, cond, rs, rt, bd); @@ -3940,17 +4033,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm32; imm32 = branch_long_offset(L); - or_(t8, ra, zero_reg); - nal(); // Read PC into ra register. - lui(t9, (imm32 & kHiMask) >> kLuiShift); // Branch delay slot. - ori(t9, t9, (imm32 & kImm16Mask)); - addu(t9, ra, t9); - if (bdslot == USE_DELAY_SLOT) { - or_(ra, t8, zero_reg); - } - jr(t9); - // Emit a or_ in the branch delay slot if it's protected. - if (bdslot == PROTECT) or_(ra, t8, zero_reg); + GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot); } } @@ -3963,13 +4046,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm32; imm32 = branch_long_offset(L); - lui(t8, (imm32 & kHiMask) >> kLuiShift); - nal(); // Read PC into ra register. - ori(t8, t8, (imm32 & kImm16Mask)); // Branch delay slot. - addu(t8, ra, t8); - jalr(t8); - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) nop(); + GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot); } } @@ -4039,7 +4116,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) { push(scratch); } -void TurboAssembler::Push(Smi* smi) { +void TurboAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); @@ -4062,7 +4139,7 @@ void MacroAssembler::PushStackHandler() { STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); - Push(Smi::kZero); // Padding. + Push(Smi::zero()); // Padding. // Link the current handler as the next handler. li(t2, @@ -4426,39 +4503,6 @@ void MacroAssembler::GetObjectType(Register object, // ----------------------------------------------------------------------------- // Runtime calls. -void MacroAssembler::CallStub(CodeStub* stub, - Condition cond, - Register r1, - const Operand& r2, - BranchDelaySlot bd) { - DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); -} - -void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond, - Register r1, const Operand& r2, - BranchDelaySlot bd) { - DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand::EmbeddedCode(stub)); - Call(scratch); -} - -void MacroAssembler::TailCallStub(CodeStub* stub, - Condition cond, - Register r1, - const Operand& r2, - BranchDelaySlot bd) { - Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); -} - -bool TurboAssembler::AllowThisStubCall(CodeStub* stub) { - return has_frame() || !stub->SometimesSetsUpAFrame(); -} - void TurboAssembler::AddOverflow(Register dst, Register left, const Operand& right, Register overflow) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -4606,7 +4650,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) { void MacroAssembler::LoadWeakValue(Register out, Register in, Label* target_if_cleared) { - Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject)); + Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); And(out, in, Operand(~kWeakHeapObjectMask)); } @@ -4741,19 +4785,6 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { lw(fp, MemOperand(fp, 0 * kPointerSize)); } -void MacroAssembler::EnterBuiltinFrame(Register context, Register target, - Register argc) { - Push(ra, fp); - Move(fp, sp); - Push(context, target, argc); -} - -void MacroAssembler::LeaveBuiltinFrame(Register context, Register target, - Register argc) { - Pop(context, target, argc); - Pop(ra, fp); -} - void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, StackFrame::Type frame_type) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -4822,7 +4853,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, } // Reserve place for the return address, stack space and an optional slot - // (used by the DirectCEntryStub to hold the return value if a struct is + // (used by DirectCEntry to hold the return value if a struct is // returned) and align the frame preparing for calling the runtime function. DCHECK_GE(stack_space, 0); Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); @@ -5041,6 +5072,9 @@ void MacroAssembler::AssertGeneratorObject(Register object) { // Check if JSGeneratorObject Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); + // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) + Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); + // Check if JSAsyncGeneratorObject Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); @@ -5379,7 +5413,36 @@ void TurboAssembler::CallCFunctionHelper(Register function_base, function_offset = 0; } + // Save the frame pointer and PC so that the stack layout remains iterable, + // even without an ExitFrame which normally exists between JS and C frames. + if (isolate() != nullptr) { + // 't' registers are caller-saved so this is safe as a scratch register. + Register scratch1 = t4; + Register scratch2 = t5; + DCHECK(!AreAliased(scratch1, scratch2, function_base)); + + Label get_pc; + mov(scratch1, ra); + Call(&get_pc); + + bind(&get_pc); + mov(scratch2, ra); + mov(ra, scratch1); + + li(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate())); + sw(scratch2, MemOperand(scratch1)); + li(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate())); + sw(fp, MemOperand(scratch1)); + } + Call(function_base, function_offset); + + if (isolate() != nullptr) { + // We don't unset the PC; the FP is the source of truth. + Register scratch = t4; + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + sw(zero_reg, MemOperand(scratch)); + } } int stack_passed_arguments = CalculateStackPassedWords( @@ -5451,6 +5514,17 @@ void TurboAssembler::ResetSpeculationPoisonRegister() { li(kSpeculationPoisonRegister, -1); } +void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) { + NoRootArrayScope no_root_array(this); + + // Save the deipt id in kRootRegister (we don't need the roots array from now + // on). + DCHECK_LE(deopt_id, 0xFFFF); + li(kRootRegister, deopt_id); + + Call(target, RelocInfo::RUNTIME_ENTRY); +} + } // namespace internal } // namespace v8 |