aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/ppc/code-generator-ppc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/ppc/code-generator-ppc.cc')
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc112
1 files changed, 79 insertions, 33 deletions
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 45cd95a9e0..fd2b2eefdb 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -74,6 +74,10 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToInt64());
#endif
case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -513,11 +517,11 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
/* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
/* being different registers is most efficiently expressed */ \
/* as -((-L) - R). */ \
- __ fneg(left_reg, left_reg); \
- if (left_reg == right_reg) { \
- __ fadd(result_reg, left_reg, right_reg); \
+ __ fneg(kScratchDoubleReg, left_reg); \
+ if (kScratchDoubleReg == right_reg) { \
+ __ fadd(result_reg, kScratchDoubleReg, right_reg); \
} else { \
- __ fsub(result_reg, left_reg, right_reg); \
+ __ fsub(result_reg, kScratchDoubleReg, right_reg); \
} \
__ fneg(result_reg, result_reg); \
__ b(&done); \
@@ -660,15 +664,15 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
__ bne(&exchange, cr0); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(i.InputRegister(2), operand); \
- __ bne(&binop, cr0); \
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
+ do { \
+ MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ Label binop; \
+ __ bind(&binop); \
+ __ load_inst(i.OutputRegister(), operand); \
+ __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
+ __ store_inst(kScratchReg, operand); \
+ __ bne(&binop, cr0); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, \
@@ -691,7 +695,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
Label exit; \
__ bind(&loop); \
__ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
+ __ cmp_inst(i.OutputRegister(), i.InputRegister(2), cr0); \
__ bne(&exit, cr0); \
__ store_inst(i.InputRegister(3), operand); \
__ bne(&loop, cr0); \
@@ -1975,32 +1979,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
+ case kPPC_Word64AtomicLoadUint8:
case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
break;
case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
break;
+ case kPPC_Word64AtomicLoadUint16:
case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
break;
+ case kPPC_Word64AtomicLoadUint32:
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
break;
+ case kPPC_Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ld, ldx);
+ break;
+ case kPPC_Word64AtomicStoreUint8:
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
break;
+ case kPPC_Word64AtomicStoreUint16:
case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
break;
+ case kPPC_Word64AtomicStoreUint32:
case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
break;
+ case kPPC_Word64AtomicStoreUint64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(std, stdx);
+ break;
case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
+ case kPPC_Word64AtomicExchangeUint8:
case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
@@ -2008,44 +2025,57 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
+ case kPPC_Word64AtomicExchangeUint16:
case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
break;
+ case kPPC_Word64AtomicExchangeUint32:
case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break;
-
+ case kPPC_Word64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
+ break;
case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
break;
+ case kPPC_Word64AtomicCompareExchangeUint8:
case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx);
break;
case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
break;
+ case kPPC_Word64AtomicCompareExchangeUint16:
case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx);
break;
+ case kPPC_Word64AtomicCompareExchangeUint32:
case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx);
break;
-
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
+ case kPPC_Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx);
+ break;
+
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint32: \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2054,6 +2084,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC64_BINOP_CASE(op, inst) \
+ case kPPC_Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
+ break;
+ ATOMIC64_BINOP_CASE(Add, add)
+ ATOMIC64_BINOP_CASE(Sub, sub)
+ ATOMIC64_BINOP_CASE(And, and_)
+ ATOMIC64_BINOP_CASE(Or, orx)
+ ATOMIC64_BINOP_CASE(Xor, xor_)
+#undef ATOMIC64_BINOP_CASE
+
case kPPC_ByteRev32: {
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
@@ -2118,7 +2159,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
+ condition == kOverflow || condition == kNotOverflow) {
return;
}
@@ -2564,9 +2606,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ mov(dst, Operand::EmbeddedStringConstant(
+ src.ToDelayedStringConstant()));
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {