|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -passes=loop-vectorize,dce,instcombine -mtriple aarch64-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -S %s | FileCheck %s |
| 3 | + |
| 4 | +; Test case with strided access (fixed 80-byte stride) |
| 5 | + |
| 6 | +; void constant_stride_i64(double* a, double* b, int n) { |
| 7 | +; for (int i = 0; i < n; i++) { |
| 8 | +; a[i] = b[i * 10] + 1; |
| 9 | +; } |
| 10 | +; } |
| 11 | + |
| 12 | + |
| 13 | +define void @constant_stride_i64(ptr noalias nocapture writeonly %a, ptr noalias nocapture readonly %b, i64 %n) #0 { |
| 14 | +; CHECK-LABEL: @constant_stride_i64( |
| 15 | +; CHECK-NEXT: entry: |
| 16 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 17 | +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 |
| 18 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] |
| 19 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 20 | +; CHECK: vector.ph: |
| 21 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 22 | +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2 |
| 23 | +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP3]] |
| 24 | +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] |
| 25 | +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() |
| 26 | +; CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 80) |
| 27 | +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| 28 | +; CHECK: vector.body: |
| 29 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 30 | +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw nsw i64 [[INDEX]], 80 |
| 31 | +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[B:%.*]], i64 [[TMP6]] |
| 32 | +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], <vscale x 4 x i64> [[TMP5]] |
| 33 | +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 8, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison) |
| 34 | +; CHECK-NEXT: [[TMP9:%.*]] = add nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], splat (i64 1) |
| 35 | +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i64, ptr [[A:%.*]], i64 [[INDEX]] |
| 36 | +; CHECK-NEXT: store <vscale x 4 x i64> [[TMP9]], ptr [[TMP10]], align 8 |
| 37 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] |
| 38 | +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 39 | +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 40 | +; CHECK: middle.block: |
| 41 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| 42 | +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] |
| 43 | +; CHECK: scalar.ph: |
| 44 | +; |
| 45 | +entry: |
| 46 | + br label %for.body |
| 47 | + |
| 48 | +for.body: |
| 49 | + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] |
| 50 | + %arrayidx.idx = mul nuw nsw i64 %indvars.iv, 80 |
| 51 | + %arrayidx = getelementptr inbounds nuw i8, ptr %b, i64 %arrayidx.idx |
| 52 | + %0 = load i64, ptr %arrayidx, align 8 |
| 53 | + %add = add nsw i64 %0, 1 |
| 54 | + %arrayidx2 = getelementptr inbounds nuw i64, ptr %a, i64 %indvars.iv |
| 55 | + store i64 %add, ptr %arrayidx2, align 8 |
| 56 | + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| 57 | + %exitcond.not = icmp eq i64 %indvars.iv.next, %n |
| 58 | + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 |
| 59 | + |
| 60 | +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry |
| 61 | + ret void |
| 62 | +} |
| 63 | + |
| 64 | +; Test stride requiring scaling (10 x i64 stride) |
| 65 | + |
| 66 | +; void constant_stride_i64_scaled(double* a, double* b, int n) { |
| 67 | +; for (int i = 0; i < n; i++) { |
| 68 | +; a[i] = b[i * 10] + 1; |
| 69 | +; } |
| 70 | +; } |
| 71 | + |
| 72 | +define void @constant_stride_i64_scaled(ptr noalias nocapture writeonly %a, ptr noalias nocapture readonly %b, i64 %n) #0 { |
| 73 | +; CHECK-LABEL: @constant_stride_i64_scaled( |
| 74 | +; CHECK-NEXT: entry: |
| 75 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 76 | +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 |
| 77 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] |
| 78 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 79 | +; CHECK: vector.ph: |
| 80 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 81 | +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2 |
| 82 | +; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP3]] |
| 83 | +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] |
| 84 | +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() |
| 85 | +; CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 80) |
| 86 | +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| 87 | +; CHECK: vector.body: |
| 88 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 89 | +; CHECK-NEXT: [[IDX:%.*]] = mul i64 [[INDEX]], 80 |
| 90 | +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[IDX]] |
| 91 | +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], <vscale x 4 x i64> [[TMP5]] |
| 92 | +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[TMP7]], i32 8, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison) |
| 93 | +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], splat (i64 1) |
| 94 | +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[A:%.*]], i64 [[INDEX]] |
| 95 | +; CHECK-NEXT: store <vscale x 4 x i64> [[TMP8]], ptr [[TMP9]], align 8 |
| 96 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] |
| 97 | +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 98 | +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 99 | +; CHECK: middle.block: |
| 100 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| 101 | +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] |
| 102 | +; CHECK: scalar.ph: |
| 103 | +; |
| 104 | +entry: |
| 105 | + br label %for.body |
| 106 | + |
| 107 | +for.body: |
| 108 | + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] |
| 109 | + %arrayidx.idx = mul nuw nsw i64 %indvars.iv, 10 |
| 110 | + %arrayidx = getelementptr i64, ptr %b, i64 %arrayidx.idx |
| 111 | + %0 = load i64, ptr %arrayidx, align 8 |
| 112 | + %add = add nsw i64 %0, 1 |
| 113 | + %arrayidx2 = getelementptr inbounds nuw i64, ptr %a, i64 %indvars.iv |
| 114 | + store i64 %add, ptr %arrayidx2, align 8 |
| 115 | + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| 116 | + %exitcond.not = icmp eq i64 %indvars.iv.next, %n |
| 117 | + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 |
| 118 | + |
| 119 | +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry |
| 120 | + ret void |
| 121 | +} |
| 122 | + |
| 123 | +attributes #0 = { vscale_range(1, 16) } |
| 124 | + |
| 125 | +!0 = distinct !{!0, !1, !2, !3, !4, !5} |
| 126 | +!1 = !{!"llvm.loop.mustprogress"} |
| 127 | +!2 = !{!"llvm.loop.vectorize.width", i32 4} |
| 128 | +!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} |
| 129 | +!4 = !{!"llvm.loop.interleave.count", i32 1} |
| 130 | +!5 = !{!"llvm.loop.vectorize.enable", i1 true} |
0 commit comments