summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/x64/instruction-selector-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/x64/instruction-selector-x64.cc')
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc217
1 files changed, 78 insertions, 139 deletions
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 04fec146de..a0f14c687c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -109,7 +109,7 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement)
: UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
@@ -289,6 +289,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -399,118 +404,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.TempImmediate(0), length_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- value_operand);
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.TempImmediate(0), length_operand, value_operand);
-}
-
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -579,7 +472,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -597,9 +491,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
- if (m.right().Is(0xff)) {
+ if (m.right().Is(0xFF)) {
Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
- } else if (m.right().Is(0xffff)) {
+ } else if (m.right().Is(0xFFFF)) {
Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else {
VisitBinop(this, node, kX64And32);
@@ -823,6 +717,10 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
+ // In the case that the base address was zero, the displacement will be
+ // in a register and replacing it with an immediate is not allowed. This
+ // usually only happens in dead code anyway.
+ if (!inputs[input_count - 1].IsImmediate()) return false;
int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
inputs[input_count - 1] =
ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
@@ -1369,6 +1267,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1376,6 +1275,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
VisitRR(this, node, kArchTruncateDoubleToI);
@@ -1538,11 +1438,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ InstructionOperand value = g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1550,31 +1450,55 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
- Node* input_node = input.node();
- if (g.CanBeImmediate(input_node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+ // Skip any alignment holes in pushed nodes. We may have one in case of a
+ // Simd128 stack argument.
+ if (input.node == nullptr) continue;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input_node))) {
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
- } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+ Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kX64Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+ Emit(kX64Push, g.NoOutput(), g.Use(input.node));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ InstructionOperand slot = g.UseImmediate(reverse_slot);
+ Emit(kX64Peek, 1, &result, 1, &slot);
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1602,7 +1526,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1624,7 +1549,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1812,7 +1738,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
} else {
@@ -2012,14 +1939,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2442,16 +2369,21 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F32x4) \
V(I32x4) \
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2505,6 +2437,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
V(I16x8Neg) \
V(I8x16Neg) \
@@ -2580,6 +2514,10 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_TYPES
+#undef SIMD_BINOP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_SHIFT_OPCODES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2601,7 +2539,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;