aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/mips/code-generator-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/mips/code-generator-mips.cc')
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc205
1 files changed, 153 insertions, 52 deletions
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 48ef0337f2..0e45172a5b 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -34,7 +34,7 @@ namespace compiler {
// Adds Mips-specific methods to convert InstructionOperands.
-class MipsOperandConverter FINAL : public InstructionOperandConverter {
+class MipsOperandConverter final : public InstructionOperandConverter {
public:
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
@@ -110,7 +110,8 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
// The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = linkage()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index(), frame(), 0);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -123,12 +124,12 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+class OutOfLineLoadSingle final : public OutOfLineCode {
public:
OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -137,12 +138,12 @@ class OutOfLineLoadSingle FINAL : public OutOfLineCode {
};
-class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<double>::quiet_NaN());
}
@@ -151,12 +152,12 @@ class OutOfLineLoadDouble FINAL : public OutOfLineCode {
};
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ mov(result_, zero_reg); }
+ void Generate() final { __ mov(result_, zero_reg); }
private:
Register const result_;
@@ -168,7 +169,7 @@ class OutOfLineRound : public OutOfLineCode {
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
// Handle rounding to zero case where sign has to be preserved.
// High bits of double input already in kScratchReg.
__ srl(at, kScratchReg, 31);
@@ -181,21 +182,21 @@ class OutOfLineRound : public OutOfLineCode {
};
-class OutOfLineTruncate FINAL : public OutOfLineRound {
+class OutOfLineTruncate final : public OutOfLineRound {
public:
OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
: OutOfLineRound(gen, result) {}
};
-class OutOfLineFloor FINAL : public OutOfLineRound {
+class OutOfLineFloor final : public OutOfLineRound {
public:
OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
: OutOfLineRound(gen, result) {}
};
-class OutOfLineCeil FINAL : public OutOfLineRound {
+class OutOfLineCeil final : public OutOfLineRound {
public:
OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
: OutOfLineRound(gen, result) {}
@@ -393,6 +394,19 @@ FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
} while (0)
+void CodeGenerator::AssembleDeconstructActivationRecord() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
+ __ LeaveFrame(StackFrame::MANUAL);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ Drop(pop_count);
+ }
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
@@ -411,6 +425,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RecordCallPosition(instr);
break;
}
+ case kArchTailCallCodeObject: {
+ AssembleDeconstructActivationRecord();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(at);
+ }
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -425,6 +450,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RecordCallPosition(instr);
break;
}
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+
+ AssembleDeconstructActivationRecord();
+ __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(at);
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -542,6 +580,48 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
+ case kMipsCmpS:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMipsAddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsSubS:
+ __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsDivS:
+ __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kMipsAbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMipsSqrtS: {
+ __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -576,6 +656,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
+ case kMipsAbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsSqrtD: {
+ __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
case kMipsFloat64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
@@ -588,10 +675,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
- case kMipsSqrtD: {
- __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- break;
- }
case kMipsCvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
@@ -682,13 +765,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Push(i.InputRegister(0));
break;
case kMipsStackClaim: {
- int words = MiscField::decode(instr->opcode());
- __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
+ __ Subu(sp, sp, Operand(i.InputInt32(0)));
break;
}
case kMipsStoreToStackSlot: {
- int slot = MiscField::decode(instr->opcode());
- __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+ __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
break;
}
case kMipsStoreWriteBarrier: {
@@ -748,6 +829,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
+static bool convertCondition(FlagsCondition condition, Condition& cc) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ return true;
+ case kNotEqual:
+ cc = ne;
+ return true;
+ case kUnsignedLessThan:
+ cc = lt;
+ return true;
+ case kUnsignedGreaterThanOrEqual:
+ cc = uge;
+ return true;
+ case kUnsignedLessThanOrEqual:
+ cc = le;
+ return true;
+ case kUnsignedGreaterThan:
+ cc = ugt;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
MipsOperandConverter i(this, instr);
@@ -781,38 +889,21 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ } else if (instr->arch_opcode() == kMipsCmpS) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+ }
+ __ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+
} else if (instr->arch_opcode() == kMipsCmpD) {
- // TODO(dusmil) optimize unordered checks to use fewer instructions
- // even if we have to unfold BranchF macro.
- Label* nan = flabel;
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- nan = tlabel;
- break;
- case kUnsignedLessThan:
- cc = lt;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = ge;
- nan = tlabel;
- break;
- case kUnsignedLessThanOrEqual:
- cc = le;
- break;
- case kUnsignedGreaterThan:
- cc = gt;
- nan = tlabel;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmpD, branch->condition);
- break;
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
@@ -1071,9 +1162,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
break;
- case Constant::kHeapObject:
- __ li(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ lw(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;