|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-target-supports-scalable-vectors -S < %s | FileCheck %s |
| 3 | + |
| 4 | +; vscale is not guaranteed to be a power of two, so this test (which |
| 5 | +; deliberately doesn't correspond to an in-tree backend since those |
| 6 | +; *do* have vscale as power-of-two) exercises the code required for the |
| 7 | +; minimum iteration check in the non-power-of-two case. |
| 8 | +define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) { |
| 9 | +; CHECK-LABEL: @foo( |
| 10 | +; CHECK-NEXT: entry: |
| 11 | +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() |
| 12 | +; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4 |
| 13 | +; CHECK-NEXT: [[TMP8:%.*]] = icmp ult i64 -257, [[TMP7]] |
| 14 | +; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 15 | +; CHECK: vector.ph: |
| 16 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 17 | +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 |
| 18 | +; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 |
| 19 | +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 256, [[TMP2]] |
| 20 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| 21 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| 22 | +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| 23 | +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 |
| 24 | +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| 25 | +; CHECK: vector.body: |
| 26 | +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ] |
| 27 | +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 256) |
| 28 | +; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]] |
| 29 | +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] |
| 30 | +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 31 | +; CHECK: middle.block: |
| 32 | +; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] |
| 33 | +; CHECK: scalar.ph: |
| 34 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] |
| 35 | +; CHECK-NEXT: br label [[WHILE_BODY:%.*]] |
| 36 | +; CHECK: while.body: |
| 37 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| 38 | +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX]] |
| 39 | +; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP]], align 4 |
| 40 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 |
| 41 | +; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], 256 |
| 42 | +; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] |
| 43 | +; CHECK: while.end.loopexit: |
| 44 | +; CHECK-NEXT: ret void |
| 45 | +; |
| 46 | +entry: |
| 47 | + br label %while.body |
| 48 | + |
| 49 | +while.body: ; preds = %while.body, %entry |
| 50 | + %index = phi i64 [ %index.next, %while.body ], [ 0, %entry ] |
| 51 | + %gep = getelementptr i32, ptr %ptr, i64 %index |
| 52 | + %ld1 = load i32, ptr %gep, align 4 |
| 53 | + %index.next = add nsw i64 %index, 1 |
| 54 | + %cmp10 = icmp ult i64 %index.next, 256 |
| 55 | + br i1 %cmp10, label %while.body, label %while.end.loopexit, !llvm.loop !0 |
| 56 | + |
| 57 | +while.end.loopexit: ; preds = %while.body |
| 58 | + ret void |
| 59 | +} |
| 60 | + |
| 61 | +!0 = distinct !{!0, !1, !2, !3, !4} |
| 62 | +!1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} |
| 63 | +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} |
| 64 | +!3 = !{!"llvm.loop.interleave.count", i32 1} |
| 65 | +!4 = !{!"llvm.loop.vectorize.width", i32 4} |
0 commit comments