summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/ia32/instruction-selector-ia32.cc')
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc230
1 files changed, 119 insertions, 111 deletions
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index aa6e9fd607..f49935d8e0 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -74,7 +74,7 @@ class IA32OperandGenerator final : public OperandGenerator {
#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Handle<HeapObject> value = HeapConstantOf(node->op());
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
#else
@@ -94,13 +94,13 @@ class IA32OperandGenerator final : public OperandGenerator {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == nullptr)
? 0
- : OpParameter<int32_t>(displacement_node);
+ : OpParameter<int32_t>(displacement_node->op());
if (displacement_mode == kNegativeDisplacement) {
displacement = -displacement;
}
if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
- displacement += OpParameter<int32_t>(base);
+ displacement += OpParameter<int32_t>(base->op());
base = nullptr;
}
}
@@ -214,7 +214,8 @@ void VisitRRISimd(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand operand1 =
+ g.UseImmediate(OpParameter<int32_t>(node->op()));
InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
@@ -288,7 +289,7 @@ void InstructionSelector::VisitLoad(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
code |= MiscField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
@@ -421,7 +422,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
Node* right = m.right().node();
InstructionOperand inputs[6];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -462,29 +463,15 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
outputs[output_count++] = g.DefineSameAsFirst(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsByteRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -1080,51 +1067,20 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
DCHECK_EQ(IrOpcode::kLoad, left->opcode());
IA32OperandGenerator g(selector);
size_t input_count = 0;
- InstructionOperand inputs[6];
+ InstructionOperand inputs[4];
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
opcode |= AddressingModeField::encode(addressing_mode);
- opcode = cont->Encode(opcode);
inputs[input_count++] = right;
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- InstructionOperand output = g.DefineAsRegister(cont->result());
- selector->Emit(opcode, 1, &output, input_count, inputs);
- } else {
- DCHECK(cont->IsTrap());
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
}
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- IA32OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1145,8 +1101,8 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
if (node->opcode() == IrOpcode::kInt32Constant ||
node->opcode() == IrOpcode::kInt64Constant) {
int64_t constant = node->opcode() == IrOpcode::kInt32Constant
- ? OpParameter<int32_t>(node)
- : OpParameter<int64_t>(node);
+ ? OpParameter<int32_t>(node->op())
+ : OpParameter<int64_t>(node->op());
if (hint == MachineType::Int8()) {
if (constant >= std::numeric_limits<int8_t>::min() &&
constant <= std::numeric_limits<int8_t>::max()) {
@@ -1299,8 +1255,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kIA32StackCheck);
CHECK(cont->IsBranch());
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ selector->EmitWithContinuation(opcode, cont);
return;
}
}
@@ -1406,20 +1361,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ value_operand, g.TempImmediate(-sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -1779,6 +1734,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -1788,6 +1744,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4Lt) \
V(F32x4Le) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -1802,6 +1759,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4GeU) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -1837,14 +1795,17 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
-#define SIMD_INT_UNOP_LIST(V) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
V(I8x16Neg)
-#define SIMD_OTHER_UNOP_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
+#define SIMD_UNOP_PREFIX_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -1868,7 +1829,8 @@ void InstructionSelector::VisitF32x4Splat(Node* node) {
void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand operand1 =
+ g.UseImmediate(OpParameter<int32_t>(node->op()));
if (IsSupported(AVX)) {
Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
} else {
@@ -1876,6 +1838,16 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
}
}
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4UConvertI32x4, g.DefineAsRegister(node), operand0);
+ } else {
+ Emit(kSSEF32x4UConvertI32x4, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
#define SIMD_I8X16_SHIFT_OPCODES(V) \
V(I8x16Shl) \
V(I8x16ShrS) \
@@ -1933,65 +1905,67 @@ SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#define VISIT_SIMD_EXTRACT_LANE(Type) \
void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
IA32OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
-#define VISIT_SIMD_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
- InstructionOperand operand2 = g.Use(node->InputAt(1)); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
- operand1, operand2); \
- } else { \
- Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
- operand1, operand2); \
- } \
+#define VISIT_SIMD_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ InstructionOperand operand2 = g.Use(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
}
SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Opcode, g.DefineAsRegister(node), operand0, operand1); \
- } else { \
- Emit(kSSE##Opcode, g.DefineSameAsFirst(node), operand0, operand1); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Opcode, g.DefineAsRegister(node), operand0, operand1); \
+ } else { \
+ Emit(kSSE##Opcode, g.DefineSameAsFirst(node), operand0, operand1); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_INT_UNOP(Opcode) \
+#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
-#undef VISIT_SIMD_INT_UNOP
-#undef SIMD_INT_UNOP_LIST
+SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
+#undef VISIT_SIMD_UNOP
+#undef SIMD_UNOP_LIST
-#define VISIT_SIMD_OTHER_UNOP(Opcode) \
+#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
-#undef VISIT_SIMD_OTHER_UNOP
-#undef SIMD_OTHER_UNOP_LIST
+SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
+#undef VISIT_SIMD_UNOP_PREFIX
+#undef SIMD_UNOP_PREFIX_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2009,6 +1983,43 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ static const int kMaxSwizzleIndex = 15;
+ static const int kMaxShuffleIndex = 31;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle32x4[4];
+ IA32OperandGenerator g(this);
+ InstructionOperand output = g.DefineAsRegister(node);
+ InstructionOperand inputs[6];
+ InstructionOperand temps[1];
+ size_t input_count = 0;
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ if (mask == kMaxSwizzleIndex) {
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kIA32S32x4Swizzle, output, g.Use(input0),
+ g.UseImmediate((shuffle32x4[0] & 3) | ((shuffle32x4[1] & 3) << 2) |
+ ((shuffle32x4[2] & 3) << 4) |
+ ((shuffle32x4[3] & 3) << 6)));
+ return;
+ }
+ // TODO(ia32): handle non 32x4 swizzles here
+ inputs[input_count++] = g.Use(input0);
+ } else {
+ DCHECK_EQ(kMaxShuffleIndex, mask);
+ USE(kMaxShuffleIndex);
+ inputs[input_count++] = g.Use(input0);
+ inputs[input_count++] = g.Use(input1);
+ }
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 4, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 8, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 12, mask));
+ temps[0] = g.TempRegister();
+ Emit(kIA32S8x16Shuffle, 1, &output, input_count, inputs, 1, temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2039,9 +2050,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
-
} // namespace compiler
} // namespace internal
} // namespace v8