Skip to content

Commit 1aec6e7

Browse files
committed
freeCollectInactiveAny should avoid visiting unnecessary intervals
1 parent 684c823 commit 1aec6e7

File tree

4 files changed

+96
-335
lines changed

4 files changed

+96
-335
lines changed

compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/alloc/lsra/IntervalWalker.java

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,6 @@ protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval current
7272
return true;
7373
}
7474

75-
void walk() {
76-
walkTo(Integer.MAX_VALUE);
77-
}
78-
7975
/**
8076
* Creates a new interval walker.
8177
*
@@ -178,7 +174,7 @@ private void walkTo(State state, int from, RegisterBinding binding) {
178174
* @return The next interval or null if there is no {@linkplain #unhandledLists unhandled}
179175
* interval at position {@code toOpId}.
180176
*/
181-
private Interval nextInterval(int toOpId) {
177+
private Interval nextInterval() {
182178
RegisterBinding binding;
183179
Interval any = unhandledLists.any;
184180
Interval fixed = unhandledLists.fixed;
@@ -188,20 +184,13 @@ private Interval nextInterval(int toOpId) {
188184
binding = !fixed.isEndMarker() && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any;
189185

190186
assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!";
191-
assert any.isEndMarker() || fixed.isEndMarker() || any.from() != fixed.from() ||
192-
binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first";
193-
194187
} else if (!fixed.isEndMarker()) {
195188
binding = RegisterBinding.Fixed;
196189
} else {
197190
return null;
198191
}
199192
Interval currentInterval = unhandledLists.get(binding);
200193

201-
if (toOpId < currentInterval.from()) {
202-
return null;
203-
}
204-
205194
currentBinding = binding;
206195
unhandledLists.set(binding, currentInterval.next);
207196
currentInterval.next = allocator.intervalEndMarker;
@@ -217,9 +206,8 @@ private Interval nextInterval(int toOpId) {
217206
* date.
218207
*/
219208
@SuppressWarnings("try")
220-
protected void walkTo(int toOpId) {
221-
assert currentPosition <= toOpId : "can not walk backwards";
222-
for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) {
209+
void walk() {
210+
for (Interval currentInterval = nextInterval(); currentInterval != null; currentInterval = nextInterval()) {
223211
int opId = currentInterval.from();
224212

225213
// set currentPosition prior to call of walkTo
@@ -242,16 +230,7 @@ protected void walkTo(int toOpId) {
242230
}
243231
}
244232
// set currentPosition prior to call of walkTo
245-
currentPosition = toOpId;
246-
247-
if (currentPosition <= allocator.maxOpId()) {
248-
// update unhandled stack intervals
249-
updateUnhandledStackIntervals(toOpId);
250-
251-
// call walkTo if still in range
252-
walkTo(State.Active, toOpId);
253-
walkTo(State.Inactive, toOpId);
254-
}
233+
currentPosition = Integer.MAX_VALUE;
255234
}
256235

257236
private void intervalMoved(Interval interval, State from, State to) {
@@ -268,7 +247,7 @@ private void intervalMoved(Interval interval, State from, State to) {
268247
* #activeLists active}.
269248
*
270249
* Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any
271-
* any} intervals this is done in {@link #nextInterval(int)}.
250+
* any} intervals this is done in {@link #nextInterval()}.
272251
*/
273252
private void updateUnhandledStackIntervals(int opId) {
274253
Interval currentInterval = unhandledLists.get(RegisterBinding.Stack);

compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/alloc/lsra/LinearScanRegisterAllocationPhase.java

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@
2424
*/
2525
package jdk.graal.compiler.lir.alloc.lsra;
2626

27-
import jdk.graal.compiler.lir.gen.LIRGenerationResult;
28-
import jdk.graal.compiler.lir.phases.AllocationPhase;
2927
import org.graalvm.collections.Pair;
30-
import jdk.graal.compiler.debug.Indent;
3128

29+
import jdk.graal.compiler.debug.Indent;
30+
import jdk.graal.compiler.lir.gen.LIRGenerationResult;
31+
import jdk.graal.compiler.lir.phases.AllocationPhase;
3232
import jdk.vm.ci.code.TargetDescription;
3333

3434
public final class LinearScanRegisterAllocationPhase extends LinearScanAllocationPhase {
@@ -57,12 +57,7 @@ void allocateRegisters() {
5757
notPrecoloredIntervals = result.getRight();
5858

5959
// allocate cpu registers
60-
LinearScanWalker lsw;
61-
if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue(allocator.getOptions())) {
62-
lsw = new OptimizingLinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
63-
} else {
64-
lsw = new LinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
65-
}
60+
LinearScanWalker lsw = new LinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
6661
lsw.walk();
6762
lsw.finishAllocation();
6863
}

compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/alloc/lsra/LinearScanWalker.java

Lines changed: 87 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ class LinearScanWalker extends IntervalWalker {
6666
private static final CountingTimerKey allocFreeRegister = countingTimer("allocFreeRegister");
6767
private static final CountingTimerKey activateCurrent = countingTimer("activateCurrent");
6868
private static final CountingTimerKey splitAndSpillIntersectingIntervals = countingTimer("splitAndSpillIntersectingIntervals");
69+
private static final CountingTimerKey allocLockedRegister = countingTimer("allocLockedRegister");
6970

7071
/**
7172
* The number of split operations on intervals generated by this walker. Splits are inserted
@@ -137,6 +138,11 @@ boolean isActiveRegister(Interval interval) {
137138
return isActiveRegister(asRegister(interval.location()).number);
138139
}
139140

141+
/**
142+
* Excludes a given interval from being used by setting its use position to 0 if it is assigned
143+
* to an active register. This method is used to mark a register as blocked, indicating that it
144+
* cannot be used for allocation.
145+
*/
140146
void excludeFromUse(Interval i) {
141147
Value location = i.location();
142148
int i1 = asRegister(location).number;
@@ -146,31 +152,29 @@ void excludeFromUse(Interval i) {
146152
}
147153

148154
void setUsePos(Interval interval, int intersects, boolean onlyProcessUsePos) {
155+
int i = asRegister(interval.location()).number;
156+
assert isActiveRegister(i) : "should have been check";
149157
if (intersects != -1) {
150158
assert intersects != 0 : "must use excludeFromUse to set usePos to 0";
151-
int i = asRegister(interval.location()).number;
152-
if (isActiveRegister(i)) {
153-
if (usePos[i] > intersects) {
154-
usePos[i] = intersects;
155-
}
156-
if (!onlyProcessUsePos) {
157-
interval.spillNext = spillIntervals[i];
158-
spillIntervals[i] = interval;
159-
}
159+
if (usePos[i] > intersects) {
160+
usePos[i] = intersects;
161+
}
162+
if (!onlyProcessUsePos) {
163+
interval.spillNext = spillIntervals[i];
164+
spillIntervals[i] = interval;
160165
}
161166
}
162167
}
163168

164169
private void setUsePosAtIntersection(Interval interval, Interval current) {
165-
if (isActiveRegister(interval)) {
166-
int i = asRegister(interval.location()).number;
167-
int savedUsePos = usePos[i];
168-
int intersects = interval.currentIntersectsAtLimit(current, savedUsePos);
169-
if (intersects != -1) {
170-
assert intersects != 0 : "must use excludeFromUse to set usePos to 0";
171-
if (usePos[i] > intersects) {
172-
usePos[i] = intersects;
173-
}
170+
assert isActiveRegister(interval) : "called should have checked";
171+
int i = asRegister(interval.location()).number;
172+
int savedUsePos = usePos[i];
173+
int intersects = interval.currentIntersectsAtLimit(current, savedUsePos);
174+
if (intersects != -1) {
175+
assert intersects != 0 : "must use excludeFromUse to set usePos to 0";
176+
if (usePos[i] > intersects) {
177+
usePos[i] = intersects;
174178
}
175179
}
176180
}
@@ -214,6 +218,12 @@ void freeExcludeActiveAny() {
214218
}
215219
}
216220

221+
/**
222+
* Collects information about inactive intervals of type {@link RegisterBinding#Fixed} that
223+
* intersect with the given {@code current} interval. The method iterates through the inactive
224+
* intervals, checks if they are assigned to an active register, and updates the use position
225+
* information accordingly.
226+
*/
217227
@SuppressWarnings("try")
218228
void freeCollectInactiveFixed(Interval current) {
219229
try (DebugCloseable t = allocator.start(freeCollectInactiveFixed)) {
@@ -222,7 +232,7 @@ void freeCollectInactiveFixed(Interval current) {
222232
if (isActiveRegister(interval)) {
223233
int currentFrom = interval.currentFrom();
224234
if (current.to() <= currentFrom) {
225-
assert !(interval.currentIntersectsAt(current) != -1) : "must not intersect";
235+
assert interval.currentIntersectsAt(current) == -1 : "must not intersect";
226236
int i = asRegister(interval.location()).number;
227237
if (usePos[i] > currentFrom) {
228238
usePos[i] = currentFrom;
@@ -236,19 +246,44 @@ void freeCollectInactiveFixed(Interval current) {
236246
}
237247
}
238248

249+
/**
250+
* Collects information about inactive intervals of type {@link RegisterBinding#Any} that
251+
* intersect with the given {@code current} interval. The method iterates through the inactive
252+
* intervals, checks if they are assigned to an active register, and updates the use position
253+
* information accordingly. The iteration stops when an interval is encountered whose
254+
* {@linkplain Interval#currentFrom() current from} position exceeds the specified
255+
* {@code limit}. This avoids potentially useless work that can become expensive for larger
256+
* compiles.
257+
*/
239258
@SuppressWarnings("try")
240-
void freeCollectInactiveAny(Interval current) {
259+
void freeCollectInactiveAny(Interval current, int limit) {
241260
try (DebugCloseable t = allocator.start(freeCollectInactiveAny)) {
242261
Interval interval = inactiveLists.get(RegisterBinding.Any);
243262
while (!interval.isEndMarker()) {
244263
if (isActiveRegister(interval)) {
245264
setUsePosAtIntersection(interval, current);
246265
}
266+
if (interval.currentFrom() > limit) {
267+
/*
268+
* All following intervals must be greater than limit as activeLists and
269+
* inactiveLists are sorted by currentFrom. See
270+
* #addToListSortedByCurrentFromPositions.
271+
*/
272+
return;
273+
}
247274
interval = interval.next;
248275
}
249276
}
250277
}
251278

279+
/**
280+
* Excludes all {@linkplain RegisterBinding#Fixed fixed} intervals currently
281+
* {@linkplain State#Active active} from being used. This is done by iterating over the
282+
* {@linkplain #activeLists active} intervals with a {@linkplain RegisterBinding#Fixed fixed}
283+
* binding and calling {@link #excludeFromUse(Interval)} on each one.
284+
*
285+
* @see #excludeFromUse(Interval)
286+
*/
252287
@SuppressWarnings("try")
253288
void spillExcludeActiveFixed() {
254289
try (DebugCloseable t = allocator.start(spillExcludeActiveFixed)) {
@@ -268,7 +303,7 @@ void spillBlockInactiveFixed(Interval current) {
268303
if (current.to() > interval.currentFrom()) {
269304
setBlockPos(interval, current);
270305
} else {
271-
assert !(interval.currentIntersectsAt(current) != -1) : "invalid optimization: intervals intersect";
306+
assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect";
272307
}
273308

274309
interval = interval.next;
@@ -801,18 +836,41 @@ boolean allocFreeRegister(Interval interval) {
801836
DebugContext debug = allocator.getDebug();
802837
try (Indent indent = debug.logAndIndent("trying to find free register for %s", interval); DebugCloseable t = allocator.start(allocFreeRegister)) {
803838

839+
/*
840+
* When allocating a partial register, we want it to be free at least until this
841+
* position. If the interval has already been split, we add 2 as a heuristic to only
842+
* consider registers free for at least two instructions. This is meant to avoid
843+
* shuffling data between registers after every single instruction. If register pressure
844+
* is so high that we would only get a partial register for the duration of one
845+
* instruction and then have to split again, prefer not choosing such a register at all.
846+
* We will spill instead.
847+
*/
848+
int partialRegRequestedUntil = interval.from() + 1 + (interval.isSplitChild() ? 2 : 0);
849+
int intervalTo = interval.to();
850+
804851
initUseLists(true);
852+
/* Mark the active registers as busy */
805853
freeExcludeActiveFixed();
806854
freeExcludeActiveAny();
855+
/*
856+
* Compute the earliest use position of inactive intervals that intersect with this
857+
* interval. These are intervals that might become active in the future.
858+
*/
807859
freeCollectInactiveFixed(interval);
808-
freeCollectInactiveAny(interval);
809-
// freeCollectUnhandled(fixedKind, cur);
860+
/*
861+
* The inactive any list can in some cases be very long which can make this step
862+
* expensive, so pass in the minimum usePos required to satisfy this interval.
863+
*/
864+
freeCollectInactiveAny(interval, Math.max(partialRegRequestedUntil, intervalTo));
865+
810866
assert unhandledLists.get(RegisterBinding.Fixed).isEndMarker() : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0";
811867

812-
// usePos contains the start of the next interval that has this register assigned
813-
// (either as a fixed register or a normal allocated register in the past)
814-
// only intervals overlapping with cur are processed, non-overlapping invervals can be
815-
// ignored safely
868+
/*
869+
* usePos contains the start of the next interval that has this register assigned
870+
* (either as a fixed register or a normal allocated register in the past) only
871+
* intervals overlapping with cur are processed, non-overlapping invervals can be
872+
* ignored safely
873+
*/
816874
if (debug.isLogEnabled()) {
817875
// Enable this logging to see all register states
818876
try (Indent indent2 = debug.logAndIndent("state of registers:")) {
@@ -833,18 +891,6 @@ boolean allocFreeRegister(Interval interval) {
833891
}
834892
assert interval.location() == null : "register already assigned to interval";
835893

836-
/*
837-
* When allocating a partial register, we want it to be free at least until this
838-
* position. If the interval has already been split, we add 2 as a heuristic to only
839-
* consider registers free for at least two instructions. This is meant to avoid
840-
* shuffling data between registers after every single instruction. If register pressure
841-
* is so high that we would only get a partial register for the duration of one
842-
* instruction and then have to split again, prefer not choosing such a register at all.
843-
* We will spill instead.
844-
*/
845-
int partialRegRequestedUntil = interval.from() + 1 + (interval.isSplitChild() ? 2 : 0);
846-
int intervalTo = interval.to();
847-
848894
boolean needSplit = false;
849895
int splitPos = -1;
850896

@@ -907,7 +953,8 @@ void splitAndSpillIntersectingIntervals(Register reg) {
907953
@SuppressWarnings("try")
908954
void allocLockedRegister(Interval interval) {
909955
DebugContext debug = allocator.getDebug();
910-
try (Indent indent = debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) {
956+
try (Indent indent = debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval);
957+
DebugCloseable t = allocator.start(allocLockedRegister)) {
911958

912959
// the register must be free at least until this position
913960
int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister);

0 commit comments

Comments
 (0)