summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/arm64/instruction-selector-arm64.cc')
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc327
1 files changed, 172 insertions, 155 deletions
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index d6082c9f0a..0787ccdc0f 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -629,9 +629,16 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
EmitLoad(this, node, opcode, immediate_mode, rep);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1238,6 +1245,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RRR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1245,6 +1253,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRR_OP_LIST(RRR_VISITOR)
#undef RRR_VISITOR
+#undef RRR_OP_LIST
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
@@ -1258,7 +1267,10 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DsbIsb, g.NoOutput());
+}
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
@@ -1642,6 +1654,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
default:
break;
}
+ break;
}
default:
break;
@@ -1680,7 +1693,7 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
Arm64OperandGenerator g(this);
@@ -1692,7 +1705,7 @@ void InstructionSelector::EmitPrepareArguments(
// Bump the stack pointer(s).
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
- // TODO(titzer): it would be better to bump the csp here only
+ // TODO(titzer): it would be better to bump the sp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
}
@@ -1720,9 +1733,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1731,7 +1744,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
@@ -1939,6 +1952,9 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
// against {value}, depending on the condition.
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
+ // Branch poisoning requires flags to be set, so when it's enabled for
+ // a particular branch, we shouldn't be applying the cbz/tbz optimization.
+ DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -1991,7 +2007,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
return true;
}
}
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
if (value != 0) return false;
@@ -2010,16 +2027,18 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
- cond, cont)) {
- return;
- }
- } else if (m.left().HasValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
- commuted_cond, cont)) {
- return;
+ if (!cont->IsPoisoned()) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
+ return;
+ }
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2092,7 +2111,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- if (cont->IsBranch() && m.right().HasValue() &&
+ if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
@@ -2142,12 +2161,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
+} // namespace
+
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(this);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -2156,84 +2176,83 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
Node* const left = m.left().node();
- if (selector->CanCover(value, left) &&
- left->opcode() == IrOpcode::kWord64And) {
+ if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
// Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
// into a tbz/tbnz instruction.
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, left, cont)) {
+ this, left, cont)) {
return;
}
- return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+ return VisitWordCompare(this, left, kArm64Tst, cont, true,
kLogical64Imm);
}
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if (cont->IsBranch() || cont->IsDeoptimize()) {
- EmitBranchOrDeoptimize(selector,
- cont->Encode(kArm64CompareAndBranch),
+ if ((cont->IsBranch() || cont->IsDeoptimize()) &&
+ !cont->IsPoisoned()) {
+ EmitBranchOrDeoptimize(this, cont->Encode(kArm64CompareAndBranch),
g.UseRegister(left), cont);
return;
}
}
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -2245,30 +2264,30 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Add32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Sub32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32MulWithOverflow:
// ARM64 doesn't set the overflow flag for multiplication, so we
// need to test on kNotEqual. Here is the code sequence used:
// smull result, left, right
// cmp result.X(), Operand(result, SXTW)
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
kArithmeticImm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
kArithmeticImm, cont);
default:
break;
@@ -2277,23 +2296,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
+ return VisitWordCompare(this, value, kArm64Cmn32, cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst32, cont, true,
kLogical32Imm);
case IrOpcode::kWord64And:
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst, cont, true,
kLogical64Imm);
default:
break;
@@ -2302,80 +2321,52 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Branch could not be combined with a compare, compare against 0 and branch.
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(value), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ if (cont->IsPoisoned()) {
+ // We need an instruction that sets flags for poisoning to work.
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ }
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
+ EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value), cont->kind(),
+ cont->reason(), cont->feedback(), cont->frame_state());
} else {
DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- g.UseImmediate(cont->trap_id()));
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.UseImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kArm64Sub32, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2622,7 +2613,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2630,13 +2621,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2650,7 +2643,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2659,13 +2652,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2683,7 +2676,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2691,15 +2684,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2718,7 +2711,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2727,15 +2720,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2793,11 +2786,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2820,11 +2814,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4, 4) \
- V(16x8, 8) \
- V(8x16, 16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
@@ -2957,6 +2946,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
+#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2964,6 +2954,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
}
SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2971,6 +2962,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
+#undef SIMD_SHIFT_OP_LIST
#define SIMD_VISIT_BINOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2978,6 +2970,7 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
@@ -3132,6 +3125,26 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxtb32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxth32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtb, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxth, node);
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtw, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -3148,7 +3161,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ReverseBits |
- MachineOperatorBuilder::kWord64ReverseBits;
+ MachineOperatorBuilder::kWord64ReverseBits |
+ MachineOperatorBuilder::kSpeculationFence;
}
// static
@@ -3158,6 +3172,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8