Skip to content

Commit f84f4e1

Browse files
committed
[LV] Don't crash on inner loops with RT checks in VPlan-native path.
Assert that we only generate runtime checks for inner loops in emitMemRuntimeChecks, instead of returning nullptr in the VPlan-native path, which is causing crashes and incorrect code.
1 parent f9146cc commit f84f4e1

File tree

2 files changed

+161
-4
lines changed

2 files changed

+161
-4
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2616,10 +2616,6 @@ BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
26162616
}
26172617

26182618
BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
2619-
// VPlan-native path does not do any analysis for runtime checks currently.
2620-
if (EnableVPlanNativePath)
2621-
return nullptr;
2622-
26232619
BasicBlock *const MemCheckBlock =
26242620
RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
26252621

@@ -2629,6 +2625,10 @@ BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
26292625
if (!MemCheckBlock)
26302626
return nullptr;
26312627

2628+
// VPlan-native path does not do any analysis for runtime checks currently.
2629+
assert((!EnableVPlanNativePath || OrigLoop->begin() == OrigLoop->end()) &&
2630+
"Runtime checks are not supported for outer loops yet");
2631+
26322632
if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
26332633
assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
26342634
"Cannot emit memory checks when optimizing for size, unless forced "
Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt -p loop-vectorize -enable-vplan-native-path -force-vector-width=4 -S %s | FileCheck %s
3+
4+
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
5+
6+
define void @expand(ptr %src, ptr %dst, i64 %0) {
7+
; CHECK-LABEL: define void @expand(
8+
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]], i64 [[TMP0:%.*]]) {
9+
; CHECK-NEXT: [[ENTRY:.*]]:
10+
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 1
11+
; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP1]], i64 1000)
12+
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[SMAX]], -1
13+
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], [[TMP0]]
14+
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP0]], 4
15+
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]]
16+
; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[TMP4]], 8
17+
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP5]]
18+
; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 8
19+
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 1
20+
; CHECK-NEXT: [[SMAX6:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP6]], i64 1000)
21+
; CHECK-NEXT: [[TMP7:%.*]] = shl i64 [[SMAX6]], 4
22+
; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
23+
; CHECK-NEXT: [[SMAX8:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP6]], i64 1000)
24+
; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[SMAX8]], [[TMP0]]
25+
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
26+
; CHECK: [[OUTER_HEADER]]:
27+
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ]
28+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP8]], 4
29+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
30+
; CHECK: [[VECTOR_SCEVCHECK]]:
31+
; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP3]])
32+
; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
33+
; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
34+
; CHECK-NEXT: [[TMP9:%.*]] = sub i64 0, [[MUL_RESULT]]
35+
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT]]
36+
; CHECK-NEXT: [[TMP11:%.*]] = icmp ult ptr [[TMP10]], [[SCEVGEP]]
37+
; CHECK-NEXT: [[TMP12:%.*]] = or i1 [[TMP11]], [[MUL_OVERFLOW]]
38+
; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP3]])
39+
; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
40+
; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
41+
; CHECK-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
42+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[MUL_RESULT3]]
43+
; CHECK-NEXT: [[TMP15:%.*]] = icmp ult ptr [[TMP14]], [[SCEVGEP1]]
44+
; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
45+
; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP12]], [[TMP16]]
46+
; CHECK-NEXT: br i1 [[TMP17]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
47+
; CHECK: [[VECTOR_MEMCHECK]]:
48+
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP7]]
49+
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP5]]
50+
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
51+
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
52+
; CHECK: [[VECTOR_PH]]:
53+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP8]], 4
54+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP8]], [[N_MOD_VF]]
55+
; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP0]], [[N_VEC]]
56+
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
57+
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
58+
; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[DOTSPLAT]], <i64 0, i64 1, i64 2, i64 3>
59+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
60+
; CHECK: [[VECTOR_BODY]]:
61+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
62+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
63+
; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[SRC]], align 8, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
64+
; CHECK-NEXT: [[TMP20:%.*]] = shl <4 x i64> [[VEC_IND]], splat (i64 1)
65+
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i64> [[TMP20]], i32 0
66+
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP21]]
67+
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i64> [[TMP20]], i32 1
68+
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP23]]
69+
; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i64> [[TMP20]], i32 2
70+
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP25]]
71+
; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i64> [[TMP20]], i32 3
72+
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP27]]
73+
; CHECK-NEXT: store double [[TMP19]], ptr [[TMP22]], align 8, !alias.scope [[META3]]
74+
; CHECK-NEXT: store double [[TMP19]], ptr [[TMP24]], align 8, !alias.scope [[META3]]
75+
; CHECK-NEXT: store double [[TMP19]], ptr [[TMP26]], align 8, !alias.scope [[META3]]
76+
; CHECK-NEXT: store double [[TMP19]], ptr [[TMP28]], align 8, !alias.scope [[META3]]
77+
; CHECK-NEXT: [[TMP29:%.*]] = or disjoint <4 x i64> [[TMP20]], splat (i64 1)
78+
; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x i64> [[TMP29]], i32 0
79+
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP30]]
80+
; CHECK-NEXT: [[TMP32:%.*]] = extractelement <4 x i64> [[TMP29]], i32 1
81+
; CHECK-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP32]]
82+
; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i64> [[TMP29]], i32 2
83+
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP34]]
84+
; CHECK-NEXT: [[TMP36:%.*]] = extractelement <4 x i64> [[TMP29]], i32 3
85+
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP36]]
86+
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP31]], align 8, !alias.scope [[META3]]
87+
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP33]], align 8, !alias.scope [[META3]]
88+
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP35]], align 8, !alias.scope [[META3]]
89+
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP37]], align 8, !alias.scope [[META3]]
90+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
91+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
92+
; CHECK-NEXT: [[TMP38:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
93+
; CHECK-NEXT: br i1 [[TMP38]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
94+
; CHECK: [[MIDDLE_BLOCK]]:
95+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]]
96+
; CHECK-NEXT: br i1 [[CMP_N]], label %[[OUTER_LATCH]], label %[[SCALAR_PH]]
97+
; CHECK: [[SCALAR_PH]]:
98+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP18]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[OUTER_HEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
99+
; CHECK-NEXT: br label %[[INNER:.*]]
100+
; CHECK: [[INNER]]:
101+
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER]] ]
102+
; CHECK-NEXT: [[INNER_IV_NEXT]] = add i64 [[INNER_IV]], 1
103+
; CHECK-NEXT: [[L:%.*]] = load double, ptr [[SRC]], align 8
104+
; CHECK-NEXT: [[TMP39:%.*]] = shl i64 [[INNER_IV]], 1
105+
; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP39]]
106+
; CHECK-NEXT: store double [[L]], ptr [[ARRAYIDX24]], align 8
107+
; CHECK-NEXT: [[TMP40:%.*]] = or disjoint i64 [[TMP39]], 1
108+
; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP40]]
109+
; CHECK-NEXT: store double 0.000000e+00, ptr [[ARRAYIDX29]], align 8
110+
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[INNER_IV_NEXT]], 1000
111+
; CHECK-NEXT: br i1 [[CMP2]], label %[[INNER]], label %[[OUTER_LATCH]], !llvm.loop [[LOOP8:![0-9]+]]
112+
; CHECK: [[OUTER_LATCH]]:
113+
; CHECK-NEXT: [[OUTER_IV_NEXT]] = add i64 [[OUTER_IV]], 1
114+
; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 100
115+
; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT:.*]], label %[[OUTER_HEADER]]
116+
; CHECK: [[EXIT]]:
117+
; CHECK-NEXT: ret void
118+
;
119+
entry:
120+
br label %outer.header
121+
122+
outer.header:
123+
%outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ]
124+
br label %inner
125+
126+
inner: ; preds = %inner, %outer.header
127+
%inner.iv = phi i64 [ %0, %outer.header ], [ %inner.iv.next, %inner ]
128+
%inner.iv.next = add i64 %inner.iv, 1
129+
%l = load double, ptr %src, align 8
130+
%2 = shl i64 %inner.iv, 1
131+
%arrayidx24 = getelementptr double, ptr %dst, i64 %2
132+
store double %l, ptr %arrayidx24, align 8
133+
%3 = or disjoint i64 %2, 1
134+
%arrayidx29 = getelementptr double, ptr %dst, i64 %3
135+
store double 0.000000e+00, ptr %arrayidx29, align 8
136+
%cmp2 = icmp slt i64 %inner.iv.next, 1000
137+
br i1 %cmp2, label %inner, label %outer.latch
138+
139+
outer.latch:
140+
%outer.iv.next = add i64 %outer.iv, 1
141+
%outer.ec = icmp eq i64 %outer.iv.next, 100
142+
br i1 %outer.ec, label %exit, label %outer.header
143+
144+
exit:
145+
ret void
146+
}
147+
;.
148+
; CHECK: [[META0]] = !{[[META1:![0-9]+]]}
149+
; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
150+
; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
151+
; CHECK: [[META3]] = !{[[META4:![0-9]+]]}
152+
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
153+
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
154+
; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
155+
; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
156+
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
157+
;.

0 commit comments

Comments
 (0)