1- ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1+ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
22; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s
33
44define void @test (ptr %A , i32 %x ) {
5- ; CHECK-LABEL: @test(
6- ; CHECK-NEXT: entry:
7- ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
8- ; CHECK: vector.scevcheck:
9- ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[X:%.*]], 1
10- ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
11- ; CHECK: vector.ph:
12- ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
13- ; CHECK: vector.body:
14- ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
5+ ; CHECK-LABEL: define void @test(
6+ ; CHECK-SAME: ptr [[A:%.*]], i32 [[X:%.*]]) {
7+ ; CHECK-NEXT: [[ENTRY:.*]]:
8+ ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
9+ ; CHECK: [[VECTOR_SCEVCHECK]]:
10+ ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[X]], 1
11+ ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
12+ ; CHECK: [[VECTOR_PH]]:
13+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
14+ ; CHECK: [[VECTOR_BODY]]:
15+ ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
1516; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
1617; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[OFFSET_IDX]], 1
1718; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
1819; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
19- ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.* ]], i64 [[TMP5]]
20+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP5]]
2021; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
2122; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP0]] to i64
2223; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]]
2324; CHECK-NEXT: store <4 x float> [[WIDE_LOAD]], ptr [[TMP9]], align 4
2425; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
25- ; CHECK-NEXT: [[TMP11 :%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
26- ; CHECK-NEXT: br i1 [[TMP11 ]], label [[MIDDLE_BLOCK:% .*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
27- ; CHECK: middle.block :
28- ; CHECK-NEXT: br i1 undef, label [[EXIT:%.*]], label [[SCALAR_PH ]]
29- ; CHECK: scalar.ph :
30- ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.* ]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
31- ; CHECK-NEXT: br label [[LOOP:% .*]]
32- ; CHECK: loop :
33- ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
26+ ; CHECK-NEXT: [[TMP7 :%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
27+ ; CHECK-NEXT: br i1 [[TMP7 ]], label % [[MIDDLE_BLOCK:.*]], label % [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
28+ ; CHECK: [[MIDDLE_BLOCK]] :
29+ ; CHECK-NEXT: br label % [[EXIT:.* ]]
30+ ; CHECK: [[SCALAR_PH]] :
31+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, % [[ENTRY]] ], [ 0, % [[VECTOR_SCEVCHECK]] ]
32+ ; CHECK-NEXT: br label % [[LOOP:.*]]
33+ ; CHECK: [[LOOP]] :
34+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], % [[LOOP]] ], [ [[BC_RESUME_VAL]], % [[SCALAR_PH]] ]
3435; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
3536; CHECK-NEXT: [[T_IV_NEXT:%.*]] = trunc i64 [[IV_NEXT]] to i32
3637; CHECK-NEXT: [[MUL_IV_NEXT:%.*]] = mul i32 [[T_IV_NEXT]], [[X]]
@@ -42,31 +43,113 @@ define void @test(ptr %A, i32 %x) {
4243; CHECK-NEXT: [[IDX_2:%.*]] = zext i32 [[MUL_IV]] to i64
4344; CHECK-NEXT: [[ARRAYIDX1209:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IDX_2]]
4445; CHECK-NEXT: store float [[LV]], ptr [[ARRAYIDX1209]], align 4
45- ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], undef
46- ; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
47- ; CHECK: exit :
46+ ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
47+ ; CHECK-NEXT: br i1 [[EC]], label % [[EXIT]], label % [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
48+ ; CHECK: [[EXIT]] :
4849; CHECK-NEXT: ret void
4950;
5051entry:
5152 br label %loop
5253
53- loop: ; preds = %loop, %entry
54+ loop:
5455 %iv = phi i64 [ %iv.next , %loop ], [ 0 , %entry ]
5556 %iv.next = add nuw nsw i64 %iv , 1
5657 %t.iv.next = trunc i64 %iv.next to i32
5758 %mul.iv.next = mul i32 %t.iv.next , %x
5859 %idx.1 = zext i32 %mul.iv.next to i64
5960 %arrayidx1215 = getelementptr inbounds float , ptr %A , i64 %idx.1
6061 %lv = load float , ptr %arrayidx1215 , align 4
61-
6262 %t.iv = trunc i64 %iv to i32
6363 %mul.iv = mul i32 %t.iv , %x
6464 %idx.2 = zext i32 %mul.iv to i64
6565 %arrayidx1209 = getelementptr inbounds float , ptr %A , i64 %idx.2
6666 store float %lv , ptr %arrayidx1209 , align 4
67- %ec = icmp eq i64 %iv.next , undef
67+ %ec = icmp eq i64 %iv.next , 1000
6868 br i1 %ec , label %exit , label %loop
6969
70- exit: ; preds = %loop
70+ exit:
71+ ret void
72+ }
73+
74+ ; FIXME: !llvm.loop.unroll.runtime.disable metadata should be added to the
75+ ; scalar loop, as there are no runtime checks needed (or they can be proven
76+ ; false).
77+ define void @diff_memcheck_known_false_for_vf_4 (ptr %B , ptr %A , ptr %end ) {
78+ ; CHECK-LABEL: define void @diff_memcheck_known_false_for_vf_4(
79+ ; CHECK-SAME: ptr [[B:%.*]], ptr [[A:%.*]], ptr [[END:%.*]]) {
80+ ; CHECK-NEXT: [[ENTRY:.*]]:
81+ ; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64
82+ ; CHECK-NEXT: [[A_INT:%.*]] = ptrtoint ptr [[A]] to i64
83+ ; CHECK-NEXT: [[B_CAST:%.*]] = ptrtoint ptr [[B]] to i64
84+ ; CHECK-NEXT: [[PTR_SUB:%.*]] = sub i64 [[A_INT]], [[B_CAST]]
85+ ; CHECK-NEXT: [[ADD_PTR11:%.*]] = getelementptr i8, ptr [[B]], i64 [[PTR_SUB]]
86+ ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A_INT]], [[END1]]
87+ ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
88+ ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
89+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
90+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
91+ ; CHECK: [[VECTOR_PH]]:
92+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
93+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
94+ ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], -8
95+ ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
96+ ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], -8
97+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[ADD_PTR11]], i64 [[TMP5]]
98+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
99+ ; CHECK: [[VECTOR_BODY]]:
100+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
101+ ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], -8
102+ ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
103+ ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 0
104+ ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3
105+ ; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP8]], align 8
106+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
107+ ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
108+ ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
109+ ; CHECK: [[MIDDLE_BLOCK]]:
110+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
111+ ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
112+ ; CHECK: [[SCALAR_PH]]:
113+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
114+ ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[ADD_PTR11]], %[[ENTRY]] ]
115+ ; CHECK-NEXT: br label %[[LOOP:.*]]
116+ ; CHECK: [[LOOP]]:
117+ ; CHECK-NEXT: [[IV_1:%.*]] = phi ptr [ [[IV_1_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
118+ ; CHECK-NEXT: [[IV_2:%.*]] = phi ptr [ [[IV_2_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ]
119+ ; CHECK-NEXT: [[IV_2_NEXT]] = getelementptr nusw i8, ptr [[IV_2]], i64 -8
120+ ; CHECK-NEXT: [[IV_1_NEXT]] = getelementptr i8, ptr [[IV_1]], i64 -8
121+ ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[IV_2_NEXT]], align 8
122+ ; CHECK-NEXT: store i64 0, ptr [[IV_1]], align 8
123+ ; CHECK-NEXT: [[CMP_NOT_I_I_I_I:%.*]] = icmp eq ptr [[END]], [[IV_2]]
124+ ; CHECK-NEXT: br i1 [[CMP_NOT_I_I_I_I]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
125+ ; CHECK: [[EXIT]]:
126+ ; CHECK-NEXT: ret void
127+ ;
128+ entry:
129+ %A.int = ptrtoint ptr %A to i64
130+ %B.cast = ptrtoint ptr %B to i64
131+ %ptr.sub = sub i64 %A.int , %B.cast
132+ %add.ptr11 = getelementptr i8 , ptr %B , i64 %ptr.sub
133+ br label %loop
134+
135+ loop:
136+ %iv.1 = phi ptr [ %iv.1.next , %loop ], [ %A , %entry ]
137+ %iv.2 = phi ptr [ %iv.2.next , %loop ], [ %add.ptr11 , %entry ]
138+ %iv.2.next = getelementptr nusw i8 , ptr %iv.2 , i64 -8
139+ %iv.1.next = getelementptr i8 , ptr %iv.1 , i64 -8
140+ %2 = load i64 , ptr %iv.2.next , align 8
141+ store i64 0 , ptr %iv.1 , align 8
142+ %cmp.not.i.i.i.i = icmp eq ptr %end , %iv.2
143+ br i1 %cmp.not.i.i.i.i , label %exit , label %loop
144+
145+ exit:
71146 ret void
72147}
148+ ;.
149+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
150+ ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
151+ ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
152+ ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
153+ ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
154+ ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
155+ ;.
0 commit comments