aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/backend/mips')
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc9
4 files changed, 58 insertions, 25 deletions
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 1f79386821..5cec4a8a16 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sw(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMipsSync: {
+ __ sync();
+ break;
+ }
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
@@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ lw(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index ba64e59429..44e53ac044 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -134,6 +134,7 @@ namespace compiler {
V(MipsStackClaim) \
V(MipsSeb) \
V(MipsSeh) \
+ V(MipsSync) \
V(MipsS128Zero) \
V(MipsI32x4Splat) \
V(MipsI32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 26a3e808cc..92ab3f9344 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUsh:
case kMipsUsw:
case kMipsUswc1:
+ case kMipsSync:
case kMipsWord32AtomicPairStore:
case kMipsWord32AtomicPairAdd:
case kMipsWord32AtomicPairSub:
@@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 0c7299d451..452e92a174 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);