summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/gap-resolver.cc
blob: 3dc1ee27c92bba3fc27ee0a8cad1875e3f997895 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler/gap-resolver.h"

#include <algorithm>
#include <functional>
#include <set>

namespace v8 {
namespace internal {
namespace compiler {

namespace {

#define REP_BIT(rep) (1 << static_cast<int>(rep))

const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);

inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
  return !move->IsEliminated() && move->source().InterferesWith(destination);
}

// Splits a FP move between two location operands into the equivalent series of
// moves between smaller sub-operands, e.g. a double move to two single moves.
// This helps reduce the number of cycles that would normally occur under FP
// aliasing, and makes swaps much easier to implement.
MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
                    ParallelMove* moves) {
  DCHECK(!kSimpleFPAliasing);
  // Splitting is only possible when the slot size is the same as float size.
  DCHECK_EQ(kPointerSize, kFloatSize);
  const LocationOperand& src_loc = LocationOperand::cast(move->source());
  const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
  MachineRepresentation dst_rep = dst_loc.representation();
  DCHECK_NE(smaller_rep, dst_rep);
  auto src_kind = src_loc.location_kind();
  auto dst_kind = dst_loc.location_kind();

  int aliases =
      1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
  int base = -1;
  USE(base);
  DCHECK_EQ(aliases, RegisterConfiguration::Default()->GetAliases(
                         dst_rep, 0, smaller_rep, &base));

  int src_index = -1;
  int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
  int src_step = 1;
  if (src_kind == LocationOperand::REGISTER) {
    src_index = src_loc.register_code() * aliases;
  } else {
    src_index = src_loc.index();
    // For operands that occuply multiple slots, the index refers to the last
    // slot. On little-endian architectures, we start at the high slot and use a
    // negative step so that register-to-slot moves are in the correct order.
    src_step = -slot_size;
  }
  int dst_index = -1;
  int dst_step = 1;
  if (dst_kind == LocationOperand::REGISTER) {
    dst_index = dst_loc.register_code() * aliases;
  } else {
    dst_index = dst_loc.index();
    dst_step = -slot_size;
  }

  // Reuse 'move' for the first fragment. It is not pending.
  move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
  move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
  // Add the remaining fragment moves.
  for (int i = 1; i < aliases; ++i) {
    src_index += src_step;
    dst_index += dst_step;
    moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
                   AllocatedOperand(dst_kind, smaller_rep, dst_index));
  }
  // Return the first fragment.
  return move;
}

}  // namespace

void GapResolver::Resolve(ParallelMove* moves) {
  // Clear redundant moves, and collect FP move representations if aliasing
  // is non-simple.
  int reps = 0;
  for (size_t i = 0; i < moves->size();) {
    MoveOperands* move = (*moves)[i];
    if (move->IsRedundant()) {
      (*moves)[i] = moves->back();
      moves->pop_back();
      continue;
    }
    i++;
    if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
      reps |=
          REP_BIT(LocationOperand::cast(move->destination()).representation());
    }
  }

  if (!kSimpleFPAliasing) {
    if (reps && !base::bits::IsPowerOfTwo(reps)) {
      // Start with the smallest FP moves, so we never encounter smaller moves
      // in the middle of a cycle of larger moves.
      if ((reps & kFloat32Bit) != 0) {
        split_rep_ = MachineRepresentation::kFloat32;
        for (size_t i = 0; i < moves->size(); ++i) {
          auto move = (*moves)[i];
          if (!move->IsEliminated() && move->destination().IsFloatRegister())
            PerformMove(moves, move);
        }
      }
      if ((reps & kFloat64Bit) != 0) {
        split_rep_ = MachineRepresentation::kFloat64;
        for (size_t i = 0; i < moves->size(); ++i) {
          auto move = (*moves)[i];
          if (!move->IsEliminated() && move->destination().IsDoubleRegister())
            PerformMove(moves, move);
        }
      }
    }
    split_rep_ = MachineRepresentation::kSimd128;
  }

  for (size_t i = 0; i < moves->size(); ++i) {
    auto move = (*moves)[i];
    if (!move->IsEliminated()) PerformMove(moves, move);
  }
}

void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
  // Each call to this function performs a move and deletes it from the move
  // graph.  We first recursively perform any move blocking this one.  We mark a
  // move as "pending" on entry to PerformMove in order to detect cycles in the
  // move graph.  We use operand swaps to resolve cycles, which means that a
  // call to PerformMove could change any source operand in the move graph.
  DCHECK(!move->IsPending());
  DCHECK(!move->IsRedundant());

  // Clear this move's destination to indicate a pending move.  The actual
  // destination is saved on the side.
  InstructionOperand source = move->source();
  DCHECK(!source.IsInvalid());  // Or else it will look eliminated.
  InstructionOperand destination = move->destination();
  move->SetPending();

  // We may need to split moves between FP locations differently.
  const bool is_fp_loc_move =
      !kSimpleFPAliasing && destination.IsFPLocationOperand();

  // Perform a depth-first traversal of the move graph to resolve dependencies.
  // Any unperformed, unpending move with a source the same as this one's
  // destination blocks this one so recursively perform all such moves.
  for (size_t i = 0; i < moves->size(); ++i) {
    auto other = (*moves)[i];
    if (other->IsEliminated()) continue;
    if (other->IsPending()) continue;
    if (other->source().InterferesWith(destination)) {
      if (is_fp_loc_move &&
          LocationOperand::cast(other->source()).representation() >
              split_rep_) {
        // 'other' must also be an FP location move. Break it into fragments
        // of the same size as 'move'. 'other' is set to one of the fragments,
        // and the rest are appended to 'moves'.
        other = Split(other, split_rep_, moves);
        // 'other' may not block destination now.
        if (!other->source().InterferesWith(destination)) continue;
      }
      // Though PerformMove can change any source operand in the move graph,
      // this call cannot create a blocking move via a swap (this loop does not
      // miss any).  Assume there is a non-blocking move with source A and this
      // move is blocked on source B and there is a swap of A and B.  Then A and
      // B must be involved in the same cycle (or they would not be swapped).
      // Since this move's destination is B and there is only a single incoming
      // edge to an operand, this move must also be involved in the same cycle.
      // In that case, the blocking move will be created but will be "pending"
      // when we return from PerformMove.
      PerformMove(moves, other);
    }
  }

  // This move's source may have changed due to swaps to resolve cycles and so
  // it may now be the last move in the cycle.  If so remove it.
  source = move->source();
  if (source.EqualsCanonicalized(destination)) {
    move->Eliminate();
    return;
  }

  // We are about to resolve this move and don't need it marked as pending, so
  // restore its destination.
  move->set_destination(destination);

  // The move may be blocked on a (at most one) pending move, in which case we
  // have a cycle.  Search for such a blocking move and perform a swap to
  // resolve it.
  auto blocker = std::find_if(moves->begin(), moves->end(),
                              std::bind2nd(std::ptr_fun(&Blocks), destination));
  if (blocker == moves->end()) {
    // The easy case: This move is not blocked.
    assembler_->AssembleMove(&source, &destination);
    move->Eliminate();
    return;
  }

  // Ensure source is a register or both are stack slots, to limit swap cases.
  if (source.IsStackSlot() || source.IsFPStackSlot()) {
    std::swap(source, destination);
  }
  assembler_->AssembleSwap(&source, &destination);
  move->Eliminate();

  // Update outstanding moves whose source may now have been moved.
  if (is_fp_loc_move) {
    // We may have to split larger moves.
    for (size_t i = 0; i < moves->size(); ++i) {
      auto other = (*moves)[i];
      if (other->IsEliminated()) continue;
      if (source.InterferesWith(other->source())) {
        if (LocationOperand::cast(other->source()).representation() >
            split_rep_) {
          other = Split(other, split_rep_, moves);
          if (!source.InterferesWith(other->source())) continue;
        }
        other->set_source(destination);
      } else if (destination.InterferesWith(other->source())) {
        if (LocationOperand::cast(other->source()).representation() >
            split_rep_) {
          other = Split(other, split_rep_, moves);
          if (!destination.InterferesWith(other->source())) continue;
        }
        other->set_source(source);
      }
    }
  } else {
    for (auto other : *moves) {
      if (other->IsEliminated()) continue;
      if (source.EqualsCanonicalized(other->source())) {
        other->set_source(destination);
      } else if (destination.EqualsCanonicalized(other->source())) {
        other->set_source(source);
      }
    }
  }
}
}  // namespace compiler
}  // namespace internal
}  // namespace v8