Skip to content

Commit 10a6fd7

Browse files
committed
[LV] Regenerate checks for test (NFC).
Auto-generate check lines for scalable-loop-unpredicated-body-scalar-tail.ll, while also updating the input to be more compact and avoid unnecessary checks to keep auto-generated checks compact without loss of generality.
1 parent e861e49 commit 10a6fd7

File tree

1 file changed

+81
-77
lines changed

1 file changed

+81
-77
lines changed

llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll

Lines changed: 81 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -1,87 +1,91 @@
1-
; RUN: opt -S -passes=loop-vectorize,instcombine -force-vector-interleave=1 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF1
2-
; RUN: opt -S -passes=loop-vectorize,instcombine -force-vector-interleave=2 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF2
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
2+
; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF1
3+
; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF2
34

4-
; CHECKUF1: for.body.preheader:
5-
; CHECKUF1-DAG: %wide.trip.count = zext nneg i32 %N to i64
6-
; CHECKUF1-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
7-
; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
8-
; CHECKUF1-DAG: %min.iters.check = icmp ugt i64 %[[VSCALEX4]], %wide.trip.count
9-
10-
; CHECKUF1: vector.ph:
11-
; CHECKUF1-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
12-
; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
13-
; CHECKUF1-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX4]]
14-
; CHECKUF1: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
15-
16-
; CHECKUF1: vector.body:
17-
; CHECKUF1: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
18-
; CHECKUF1: %[[IDXB:.*]] = getelementptr inbounds double, ptr %b, i64 %index
19-
; CHECKUF1: %wide.load = load <vscale x 4 x double>, ptr %[[IDXB]], align 8
20-
; CHECKUF1: %[[FADD:.*]] = fadd <vscale x 4 x double> %wide.load, splat (double 1.000000e+00)
21-
; CHECKUF1: %[[IDXA:.*]] = getelementptr inbounds double, ptr %a, i64 %index
22-
; CHECKUF1: store <vscale x 4 x double> %[[FADD]], ptr %[[IDXA]], align 8
23-
; CHECKUF1: %index.next = add nuw i64 %index, %[[VSCALEX4]]
24-
; CHECKUF1: %[[CMP:.*]] = icmp eq i64 %index.next, %n.vec
25-
; CHECKUF1: br i1 %[[CMP]], label %middle.block, label %vector.body, !llvm.loop !0
26-
27-
28-
; For an interleave factor of 2, vscale is scaled by 8 instead of 4 (and thus shifted left by 3 instead of 2).
5+
; For an interleave factor of 2, vscale is scaled by 8 instead of 4.
296
; There is also the increment for the next iteration, e.g. instead of indexing IDXB, it indexes at IDXB + vscale * 4.
30-
31-
; CHECKUF2: for.body.preheader:
32-
; CHECKUF2-DAG: %wide.trip.count = zext nneg i32 %N to i64
33-
; CHECKUF2-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
34-
; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
35-
; CHECKUF2-DAG: %min.iters.check = icmp ugt i64 %[[VSCALEX8]], %wide.trip.count
36-
37-
; CHECKUF2: vector.ph:
38-
; CHECKUF2-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
39-
; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
40-
; CHECKUF2-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX8]]
41-
; CHECKUF2: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
42-
43-
; CHECKUF2: vector.body:
44-
; CHECKUF2: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
45-
; CHECKUF2: %[[IDXB:.*]] = getelementptr inbounds double, ptr %b, i64 %index
46-
; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
47-
; CHECKUF2: %[[VSCALE2:.*]] = shl i64 %[[VSCALE]], 5
48-
; CHECKUF2: %[[IDXB_NEXT:.*]] = getelementptr inbounds i8, ptr %[[IDXB]], i64 %[[VSCALE2]]
49-
; CHECKUF2: %wide.load = load <vscale x 4 x double>, ptr %[[IDXB]], align 8
50-
; CHECKUF2: %wide.load{{[0-9]+}} = load <vscale x 4 x double>, ptr %[[IDXB_NEXT]], align 8
51-
; CHECKUF2: %[[FADD:.*]] = fadd <vscale x 4 x double> %wide.load, splat (double 1.000000e+00)
52-
; CHECKUF2: %[[FADD_NEXT:.*]] = fadd <vscale x 4 x double> %wide.load{{[0-9]+}}, splat (double 1.000000e+00)
53-
; CHECKUF2: %[[IDXA:.*]] = getelementptr inbounds double, ptr %a, i64 %index
54-
; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
55-
; CHECKUF2: %[[VSCALE2:.*]] = shl i64 %[[VSCALE]], 5
56-
; CHECKUF2: %[[IDXA_NEXT:.*]] = getelementptr inbounds i8, ptr %[[IDXA]], i64 %[[VSCALE2]]
57-
; CHECKUF2: store <vscale x 4 x double> %[[FADD]], ptr %[[IDXA]], align 8
58-
; CHECKUF2: store <vscale x 4 x double> %[[FADD_NEXT]], ptr %[[IDXA_NEXT]], align 8
59-
; CHECKUF2: %index.next = add nuw i64 %index, %[[VSCALEX8]]
60-
; CHECKUF2: %[[CMP:.*]] = icmp eq i64 %index.next, %n.vec
61-
; CHECKUF2: br i1 %[[CMP]], label %middle.block, label %vector.body, !llvm.loop !0
62-
63-
define void @loop(i32 %N, ptr nocapture %a, ptr nocapture readonly %b) {
7+
define void @loop(i64 %N, ptr noalias %a, ptr noalias %b) {
8+
; CHECKUF1-LABEL: define void @loop(
9+
; CHECKUF1-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
10+
; CHECKUF1-NEXT: [[ENTRY:.*:]]
11+
; CHECKUF1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
12+
; CHECKUF1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
13+
; CHECKUF1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
14+
; CHECKUF1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
15+
; CHECKUF1: [[VECTOR_PH]]:
16+
; CHECKUF1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
17+
; CHECKUF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
18+
; CHECKUF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
19+
; CHECKUF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
20+
; CHECKUF1-NEXT: br label %[[VECTOR_BODY:.*]]
21+
; CHECKUF1: [[VECTOR_BODY]]:
22+
; CHECKUF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
23+
; CHECKUF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
24+
; CHECKUF1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
25+
; CHECKUF1-NEXT: [[TMP8:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
26+
; CHECKUF1-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
27+
; CHECKUF1-NEXT: store <vscale x 4 x double> [[TMP8]], ptr [[TMP9]], align 8
28+
; CHECKUF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
29+
; CHECKUF1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
30+
; CHECKUF1-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
31+
; CHECKUF1: [[MIDDLE_BLOCK]]:
32+
; CHECKUF1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
33+
; CHECKUF1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
34+
; CHECKUF1: [[SCALAR_PH]]:
35+
;
36+
; CHECKUF2-LABEL: define void @loop(
37+
; CHECKUF2-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
38+
; CHECKUF2-NEXT: [[ENTRY:.*:]]
39+
; CHECKUF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
40+
; CHECKUF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
41+
; CHECKUF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
42+
; CHECKUF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
43+
; CHECKUF2: [[VECTOR_PH]]:
44+
; CHECKUF2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
45+
; CHECKUF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
46+
; CHECKUF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
47+
; CHECKUF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
48+
; CHECKUF2-NEXT: br label %[[VECTOR_BODY:.*]]
49+
; CHECKUF2: [[VECTOR_BODY]]:
50+
; CHECKUF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
51+
; CHECKUF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
52+
; CHECKUF2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
53+
; CHECKUF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP8]], 4
54+
; CHECKUF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP7]], i64 [[TMP16]]
55+
; CHECKUF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
56+
; CHECKUF2-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x double>, ptr [[TMP9]], align 8
57+
; CHECKUF2-NEXT: [[TMP10:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
58+
; CHECKUF2-NEXT: [[TMP11:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD3]], splat (double 1.000000e+00)
59+
; CHECKUF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
60+
; CHECKUF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
61+
; CHECKUF2-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP13]], 4
62+
; CHECKUF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[TMP12]], i64 [[TMP17]]
63+
; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP10]], ptr [[TMP12]], align 8
64+
; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP11]], ptr [[TMP14]], align 8
65+
; CHECKUF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
66+
; CHECKUF2-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
67+
; CHECKUF2-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
68+
; CHECKUF2: [[MIDDLE_BLOCK]]:
69+
; CHECKUF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
70+
; CHECKUF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
71+
; CHECKUF2: [[SCALAR_PH]]:
72+
;
6473
entry:
65-
%cmp7 = icmp sgt i32 %N, 0
66-
br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
67-
68-
for.body.preheader: ; preds = %entry
69-
%wide.trip.count = zext i32 %N to i64
70-
br label %for.body
71-
72-
for.cond.cleanup: ; preds = %for.body, %entry
73-
ret void
74+
br label %loop
7475

75-
for.body: ; preds = %for.body.preheader, %for.body
76-
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
77-
%arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
76+
loop:
77+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
78+
%arrayidx = getelementptr inbounds double, ptr %b, i64 %iv
7879
%0 = load double, ptr %arrayidx, align 8
7980
%add = fadd double %0, 1.000000e+00
80-
%arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
81+
%arrayidx2 = getelementptr inbounds double, ptr %a, i64 %iv
8182
store double %add, ptr %arrayidx2, align 8
82-
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
83-
%exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
84-
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !1
83+
%iv.next = add nuw nsw i64 %iv, 1
84+
%ec = icmp eq i64 %iv.next, %N
85+
br i1 %ec, label %exit, label %loop, !llvm.loop !1
86+
87+
exit:
88+
ret void
8589
}
8690

8791
!1 = distinct !{!1, !2}

0 commit comments

Comments
 (0)