1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
|
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_GREEDY_ALLOCATOR_H_
#define V8_GREEDY_ALLOCATOR_H_
#include "src/compiler/coalesced-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// The object of allocation scheduling. At minimum, this is a LiveRange, but
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
explicit AllocationCandidate(LiveRange* range)
: is_group_(false), size_(range->GetSize()) {
candidate_.range_ = range;
}
explicit AllocationCandidate(LiveRangeGroup* ranges)
: is_group_(true), size_(CalculateGroupSize(ranges)) {
candidate_.group_ = ranges;
}
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
return size() > other.size();
}
bool is_group() const { return is_group_; }
LiveRange* live_range() const { return candidate_.range_; }
LiveRangeGroup* group() const { return candidate_.group_; }
private:
unsigned CalculateGroupSize(LiveRangeGroup* group) {
unsigned ret = 0;
for (LiveRange* range : group->ranges()) {
ret += range->GetSize();
}
return ret;
}
unsigned size() const { return size_; }
bool is_group_;
unsigned size_;
union {
LiveRange* range_;
LiveRangeGroup* group_;
} candidate_;
};
// Schedule processing (allocating) of AllocationCandidates.
class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
private:
typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
ScheduleQueue queue_;
DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
};
// A variant of the LLVM Greedy Register Allocator. See
// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
class GreedyAllocator final : public RegisterAllocator {
public:
explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
void AllocateRegisters();
private:
static const float kAllocatedRangeMultiplier;
static void UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
static void UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
CoalescedLiveRanges* current_allocations(unsigned i) const {
return allocations_[i];
}
Zone* local_zone() const { return local_zone_; }
ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
void GroupLiveRanges();
// Schedule unassigned live ranges for allocation.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
UpdateWeightAtAllocation(range);
current_allocations(reg_id)->AllocateRange(range);
}
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
float competing_weight) const;
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
// Find a good position where to fill, after a range was spilled after a call.
LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
int call_index);
// Split a range around all calls it passes over. Returns true if any changes
// were made, or false if no calls were found.
bool TrySplitAroundCalls(LiveRange* range);
// Find a split position at the outmost loop.
LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
// Finds the first call instruction in the path of this range. Splits before
// and requeues that segment (if any), spills the section over the call, and
// returns the section after the call. The return is:
// - same range, if no call was found
// - nullptr, if the range finished at the call and there's no "after the
// call" portion.
// - the portion after the call.
LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
// While we attempt to merge spill ranges later on in the allocation pipeline,
// we want to ensure group elements get merged. Waiting until later may hinder
// merge-ability, since the pipeline merger (being naive) may create conflicts
// between spill ranges of group members.
void TryReuseSpillRangesForGroups();
LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
bool IsProgressPossible(const LiveRange* range);
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);
void AssignRangeToRegister(int reg_id, LiveRange* range);
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
ZoneVector<LiveRangeGroup*> groups_;
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_GREEDY_ALLOCATOR_H_
|