summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/greedy-allocator.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/greedy-allocator.h')
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h80
1 files changed, 73 insertions, 7 deletions
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
index c4e330eb97..45bbd87da8 100644
--- a/deps/v8/src/compiler/greedy-allocator.h
+++ b/deps/v8/src/compiler/greedy-allocator.h
@@ -18,21 +18,45 @@ namespace compiler {
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
- explicit AllocationCandidate(LiveRange* range) : range_(range) {}
+ explicit AllocationCandidate(LiveRange* range)
+ : is_group_(false), size_(range->GetSize()) {
+ candidate_.range_ = range;
+ }
+
+ explicit AllocationCandidate(LiveRangeGroup* ranges)
+ : is_group_(true), size_(CalculateGroupSize(ranges)) {
+ candidate_.group_ = ranges;
+ }
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
- return range_->GetSize() < other.range_->GetSize();
+ return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
- return range_->GetSize() > other.range_->GetSize();
+ return size() > other.size();
}
- LiveRange* live_range() const { return range_; }
+ bool is_group() const { return is_group_; }
+ LiveRange* live_range() const { return candidate_.range_; }
+ LiveRangeGroup* group() const { return candidate_.group_; }
private:
- LiveRange* range_;
+ unsigned CalculateGroupSize(LiveRangeGroup* group) {
+ unsigned ret = 0;
+ for (LiveRange* range : group->ranges()) {
+ ret += range->GetSize();
+ }
+ return ret;
+ }
+
+ unsigned size() const { return size_; }
+ bool is_group_;
+ unsigned size_;
+ union {
+ LiveRange* range_;
+ LiveRangeGroup* group_;
+ } candidate_;
};
@@ -41,6 +65,7 @@ class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
+ void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
@@ -85,12 +110,15 @@ class GreedyAllocator final : public RegisterAllocator {
}
Zone* local_zone() const { return local_zone_; }
+ ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
+ const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
+ void GroupLiveRanges();
+
// Schedule unassigned live ranges for allocation.
- // TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
@@ -106,6 +134,7 @@ class GreedyAllocator final : public RegisterAllocator {
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
+ void TryAllocateGroup(LiveRangeGroup* group);
bool CanProcessRange(LiveRange* range) const {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
@@ -119,12 +148,47 @@ class GreedyAllocator final : public RegisterAllocator {
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
+ float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
+ float competing_weight) const;
+
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRange* range) const;
+ const LiveRangeGroup* group,
+ float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
+ // Find a good position where to fill, after a range was spilled after a call.
+ LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
+ int call_index);
+ // Split a range around all calls it passes over. Returns true if any changes
+ // were made, or false if no calls were found.
+ bool TrySplitAroundCalls(LiveRange* range);
+
+ // Find a split position at the outmost loop.
+ LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
+
+ // Finds the first call instruction in the path of this range. Splits before
+ // and requeues that segment (if any), spills the section over the call, and
+ // returns the section after the call. The return is:
+ // - same range, if no call was found
+ // - nullptr, if the range finished at the call and there's no "after the
+ // call" portion.
+ // - the portion after the call.
+ LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
+
+ // While we attempt to merge spill ranges later on in the allocation pipeline,
+ // we want to ensure group elements get merged. Waiting until later may hinder
+ // merge-ability, since the pipeline merger (being naive) may create conflicts
+ // between spill ranges of group members.
+ void TryReuseSpillRangesForGroups();
+
+ LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
+
+ bool IsProgressPossible(const LiveRange* range);
+
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);
@@ -133,6 +197,8 @@ class GreedyAllocator final : public RegisterAllocator {
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
+ ZoneVector<LiveRangeGroup*> groups_;
+
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler