summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/x64/instruction-selector-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/x64/instruction-selector-x64.cc')
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc456
1 files changed, 319 insertions, 137 deletions
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index f46ff5946d..798d438e25 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -22,6 +22,7 @@ class X64OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
+ case IrOpcode::kRelocatableInt32Constant:
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
@@ -36,6 +37,15 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
+ int32_t GetImmediateIntegerValue(Node* node) {
+ DCHECK(CanBeImmediate(node));
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node);
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return static_cast<int32_t>(OpParameter<int64_t>(node));
+ }
+
bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
int effect_level) {
if (input->opcode() != IrOpcode::kLoad ||
@@ -69,6 +79,7 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
Node* base, Node* displacement,
+ DisplacementMode displacement_mode,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
@@ -78,7 +89,9 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
mode = kMRnI_modes[scale_exponent];
@@ -91,7 +104,9 @@ class X64OperandGenerator final : public OperandGenerator {
if (displacement == nullptr) {
mode = kMode_MR;
} else {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
mode = kMode_MRI;
}
}
@@ -100,7 +115,9 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale_exponent];
@@ -120,11 +137,12 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand inputs[],
size_t* input_count) {
- BaseWithIndexAndDisplacement64Matcher m(operand, true);
+ BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
- m.displacement(), inputs, input_count);
+ return GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(),
+ m.displacement_mode(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -160,6 +178,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
@@ -245,6 +265,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
@@ -268,6 +290,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -297,6 +324,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -350,6 +379,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -431,7 +462,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -541,16 +572,16 @@ void VisitWord64Shift(InstructionSelector* selector, Node* node,
}
}
-
void EmitLea(InstructionSelector* selector, InstructionCode opcode,
Node* result, Node* index, int scale, Node* base,
- Node* displacement) {
+ Node* displacement, DisplacementMode displacement_mode) {
X64OperandGenerator g(selector);
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- index, scale, base, displacement, inputs, &input_count);
+ AddressingMode mode =
+ g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+ displacement_mode, inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -571,7 +602,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
}
VisitWord32Shift(this, node, kX64Shl32);
@@ -580,15 +612,25 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord64Shl(Node* node) {
X64OperandGenerator g(this);
- Int64BinopMatcher m(node);
- if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
- // There's no need to sign/zero-extend to 64-bit if we shift out the upper
- // 32 bits anyway.
- Emit(kX64Shl, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()->InputAt(0)),
- g.UseImmediate(m.right().node()));
+ Int64ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
+ } else {
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() ||
+ m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kX64Shl, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
}
VisitWord64Shift(this, node, kX64Shl);
}
@@ -598,37 +640,19 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitWord32Shift(this, node, kX64Shr32);
}
-
-void InstructionSelector::VisitWord64Shr(Node* node) {
- VisitWord64Shift(this, node, kX64Shr);
-}
-
-
-void InstructionSelector::VisitWord32Sar(Node* node) {
- X64OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
- return;
- } else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
- return;
- }
- }
- VisitWord32Shift(this, node, kX64Sar32);
-}
-
-
-void InstructionSelector::VisitWord64Sar(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
+ IrOpcode::kWord64Shr == node->opcode());
+ X64OperandGenerator g(selector);
Int64BinopMatcher m(node);
- if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+ if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
m.right().Is(32)) {
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
- BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+ BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
+ AddressOption::kAllowAll);
if (mleft.matches() && (mleft.displacement() == nullptr ||
g.CanBeImmediate(mleft.displacement()))) {
size_t input_count = 0;
@@ -681,16 +705,43 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
- ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
- int32_t displacement = sequence()->GetImmediate(op).ToInt32();
- *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+ int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
+ inputs[input_count - 1] =
+ ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
}
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
- InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ selector->Emit(code, 1, outputs, input_count, inputs);
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
+ VisitWord64Shift(this, node, kX64Shr);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ X64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
return;
}
}
+ VisitWord32Shift(this, node, kX64Sar32);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
VisitWord64Shift(this, node, kX64Sar);
}
@@ -734,6 +785,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) {
X64OperandGenerator g(this);
@@ -755,7 +809,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.matches() &&
(m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
- m.displacement());
+ m.displacement(), m.displacement_mode());
return;
}
@@ -765,6 +819,18 @@ void InstructionSelector::VisitInt32Add(Node* node) {
void InstructionSelector::VisitInt64Add(Node* node) {
+ X64OperandGenerator g(this);
+
+ // Try to match the Add to a leaq pattern
+ BaseWithIndexAndDisplacement64Matcher m(node);
+ if (m.matches() &&
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
+ EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
+ m.displacement(), m.displacement_mode());
+ return;
+ }
+
+ // No leal pattern match, use addq
VisitBinop(this, node, kX64Add);
}
@@ -804,6 +870,14 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
+ if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ // Turn subtractions of constant values into immediate "leaq" instructions
+ // by negating the value.
+ Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
+ return;
+ }
VisitBinop(this, node, kX64Sub);
}
}
@@ -838,7 +912,6 @@ void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
}
}
-
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
@@ -880,18 +953,27 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
}
VisitMul(this, node, kX64Imul32);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ // TODO(mvstanton): Use Int32ScaleMatcher somehow.
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Imul32, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Imul32, &cont);
+}
void InstructionSelector::VisitInt64Mul(Node* node) {
VisitMul(this, node, kX64Imul);
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64ImulHigh32);
}
@@ -1056,7 +1138,36 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Node* const value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ MachineRepresentation rep = load_rep.representation();
+ InstructionCode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ size_t input_count = 0;
+ InstructionOperand inputs[3];
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ node->InputAt(0), inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
+ } else {
+ Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ }
}
@@ -1113,6 +1224,12 @@ void VisitRR(InstructionSelector* selector, Node* node,
g.UseRegister(node->InputAt(0)));
}
+void VisitRRO(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+}
void VisitFloatBinop(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
@@ -1144,15 +1261,8 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRO(this, node, kSSEFloat64ToFloat32);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, node, kArchTruncateDoubleToI);
- case TruncationMode::kRoundToZero:
- return VisitRO(this, node, kSSEFloat64ToInt32);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, node, kArchTruncateDoubleToI);
}
@@ -1165,6 +1275,10 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().Is(32)) {
+ if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
@@ -1178,6 +1292,9 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToInt32);
+}
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
X64OperandGenerator g(this);
@@ -1249,17 +1366,9 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
- X64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
- kSSEFloat32Neg);
- return;
- }
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
}
@@ -1270,16 +1379,6 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
-}
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
-}
-
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
@@ -1289,6 +1388,13 @@ void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat32Sqrt);
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRO(this, node, kSSEFloat32Max);
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRO(this, node, kSSEFloat32Min);
+}
void InstructionSelector::VisitFloat64Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
@@ -1296,29 +1402,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
- X64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
- g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
- kSSEFloat64Neg);
- return;
- }
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
-
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
}
@@ -1339,12 +1425,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
+ VisitRRO(this, node, kSSEFloat64Max);
}
void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
+ VisitRRO(this, node, kSSEFloat64Min);
}
@@ -1352,7 +1438,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat64Sqrt);
}
@@ -1402,6 +1487,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
+ g.UseFixed(node->InputAt(1), xmm1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1411,7 +1518,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1434,7 +1541,7 @@ void InstructionSelector::EmitPrepareArguments(
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
Emit(kX64Push, g.NoOutput(), value);
@@ -1469,7 +1576,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1487,7 +1594,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1510,10 +1617,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right) {
- if (opcode != kX64Cmp32 && opcode != kX64Test32) {
- return opcode;
- }
+ Node* right, FlagsContinuation* cont) {
// Currently, if one of the two operands is not a Load, we don't know what its
// machine representation is, so we bail out.
// TODO(epertoso): we can probably get some size information out of immediates
@@ -1523,19 +1627,39 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
}
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- LoadRepresentation left_representation = LoadRepresentationOf(left->op());
- if (left_representation != LoadRepresentationOf(right->op())) {
- return opcode;
- }
- switch (left_representation.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
- case MachineRepresentation::kWord16:
- return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
- default:
- return opcode;
+ MachineType left_type = LoadRepresentationOf(left->op());
+ MachineType right_type = LoadRepresentationOf(right->op());
+ if (left_type == right_type) {
+ switch (left_type.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8: {
+ if (opcode == kX64Test32) return kX64Test8;
+ if (opcode == kX64Cmp32) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX64Cmp8;
+ }
+ break;
+ }
+ case MachineRepresentation::kWord16:
+ if (opcode == kX64Test32) return kX64Test16;
+ if (opcode == kX64Cmp32) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX64Cmp16;
+ }
+ break;
+ default:
+ break;
+ }
}
+ return opcode;
}
// Shared routine for multiple word compare operations.
@@ -1545,7 +1669,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- opcode = TryNarrowOpcodeSize(opcode, left, right);
+ opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
// If one of the two inputs is an immediate, make sure it's on the right, or
// if one of the two inputs is a memory operand, make sure it's on the left.
@@ -1604,7 +1728,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1746,6 +1870,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX64Sub32, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Imul32, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX64Add, cont);
@@ -1785,14 +1912,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2033,15 +2160,63 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX64Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
if (CpuFeatures::IsSupported(POPCNT)) {
@@ -2061,6 +2236,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8