summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/simd-scalar-lowering.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/simd-scalar-lowering.cc')
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc130
1 files changed, 100 insertions, 30 deletions
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 423d757a4f..980c88a6e6 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -85,6 +85,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -116,6 +117,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -138,6 +140,7 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8ShrS) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -354,7 +357,6 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
int num_lanes = NumLanes(rep_type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, rep_type);
- DCHECK_LT(2, node->InputCount());
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
@@ -387,14 +389,30 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+#else
+ rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -436,7 +454,8 @@ Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
DCHECK(input_rep_type == SimdType::kInt16x8 ||
input_rep_type == SimdType::kInt8x16);
@@ -446,9 +465,29 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
int32_t shift_val =
(input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]),
- shift_val);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i], rep_right[i]), shift_val);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+#else
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -578,9 +617,9 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
args[3] = graph()->start();
Signature<MachineType>::Builder sig_builder(zone(), 0, 1);
sig_builder.AddParam(MachineType::Pointer());
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), sig_builder.Build());
- Node* call = graph()->NewNode(common()->Call(desc), 4, args);
+ Node* call = graph()->NewNode(common()->Call(call_descriptor), 4, args);
return graph()->NewNode(machine()->Load(LoadRepresentation::Float64()),
stack_slot, jsgraph_->Int32Constant(0), call,
graph()->start());
@@ -636,7 +675,8 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
break;
case IrOpcode::kI16x8ShrU:
- rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
+ rep_node[i] = Mask(rep_node[i], kMask16);
+ V8_FALLTHROUGH;
case IrOpcode::kI32x4ShrU:
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
@@ -751,22 +791,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep =
StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
- store_op = machine()->Store(StoreRepresentation(
- MachineTypeFrom(rep_type).representation(), write_barrier_kind));
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->Store(
+ StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
+ write_barrier_kind));
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
- store_op =
- machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->UnalignedStore(
+ MachineTypeFrom(input_rep_type).representation());
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kReturn: {
@@ -779,18 +832,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kCall: {
// TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128())) {
+ (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128())) {
// We have to adjust the call descriptor.
- const Operator* op =
- common()->Call(GetI32WasmCallDescriptorForSimd(zone(), descriptor));
+ const Operator* op = common()->Call(
+ GetI32WasmCallDescriptorForSimd(zone(), call_descriptor));
NodeProperties::ChangeOp(node, op);
}
- if (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128()) {
+ if (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kNumLanes32];
for (int i = 0; i < kNumLanes32; ++i) {
@@ -831,6 +884,14 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
+ case IrOpcode::kI32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
+ case IrOpcode::kI16x8AddHoriz: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
case IrOpcode::kI16x8Add:
case IrOpcode::kI8x16Add: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
@@ -940,6 +1001,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerShiftOp(node, rep_type);
break;
}
+ case IrOpcode::kF32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Float32Add(), false);
+ break;
+ }
#define F32X4_BINOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
@@ -1002,7 +1067,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
- Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** old_rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = old_rep_node[i];
+ }
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
@@ -1075,11 +1144,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
- jsgraph_->Int32Constant(0)));
- rep_node[i] = d.Phi(MachineTypeFrom(rep_type).representation(),
- rep_right[1], rep_left[0]);
+ Node* tmp1 =
+ graph()->NewNode(machine()->Word32Xor(), rep_left[i], rep_right[i]);
+ Node* tmp2 =
+ graph()->NewNode(machine()->Word32And(), boolean_input[i], tmp1);
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Xor(), rep_right[i], tmp2);
}
ReplaceNode(node, rep_node, num_lanes);
break;