|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 |
| 2 | +; RUN: opt -S -mcpu=grace -passes=loop-vectorize -mtriple=aarch64 < %s | FileCheck %s |
| 3 | +target triple = "aarch64" |
| 4 | + |
| 5 | +; Check that a partial reduction is reverted back to a regular reduction, |
| 6 | +; so that we compare "the VPlan with the best kind of reduction for <range>" |
| 7 | +; vs "the VPlan with the best kind of reduction for <other range>", |
| 8 | + |
| 9 | +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: read) uwtable vscale_range(1,16) |
| 10 | +define dso_local i64 @foo(ptr noundef readonly captures(none) %0, i32 noundef %1) local_unnamed_addr #0 { |
| 11 | +; CHECK-LABEL: define dso_local i64 @foo( |
| 12 | +; CHECK-SAME: ptr noundef readonly captures(none) [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { |
| 13 | +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[TMP1]], 0 |
| 14 | +; CHECK-NEXT: br i1 [[TMP3]], label %[[ITER_CHECK:.*]], label %[[BB27:.*]] |
| 15 | +; CHECK: [[ITER_CHECK]]: |
| 16 | +; CHECK-NEXT: [[TMP4:%.*]] = zext nneg i32 [[TMP1]] to i64 |
| 17 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4 |
| 18 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] |
| 19 | +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: |
| 20 | +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP4]], 16 |
| 21 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] |
| 22 | +; CHECK: [[VECTOR_PH]]: |
| 23 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 16 |
| 24 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] |
| 25 | +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| 26 | +; CHECK: [[VECTOR_BODY]]: |
| 27 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 28 | +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP13:%.*]], %[[VECTOR_BODY]] ] |
| 29 | +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP14:%.*]], %[[VECTOR_BODY]] ] |
| 30 | +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] |
| 31 | +; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP16:%.*]], %[[VECTOR_BODY]] ] |
| 32 | +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[INDEX]] |
| 33 | +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i32 4 |
| 34 | +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i32 8 |
| 35 | +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i32 12 |
| 36 | +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 |
| 37 | +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4 |
| 38 | +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 |
| 39 | +; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4 |
| 40 | +; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64> |
| 41 | +; CHECK-NEXT: [[TMP10:%.*]] = sext <4 x i32> [[WIDE_LOAD5]] to <4 x i64> |
| 42 | +; CHECK-NEXT: [[TMP11:%.*]] = sext <4 x i32> [[WIDE_LOAD6]] to <4 x i64> |
| 43 | +; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i32> [[WIDE_LOAD7]] to <4 x i64> |
| 44 | +; CHECK-NEXT: [[TMP13]] = add <4 x i64> [[VEC_PHI]], [[TMP9]] |
| 45 | +; CHECK-NEXT: [[TMP14]] = add <4 x i64> [[VEC_PHI2]], [[TMP10]] |
| 46 | +; CHECK-NEXT: [[TMP15]] = add <4 x i64> [[VEC_PHI3]], [[TMP11]] |
| 47 | +; CHECK-NEXT: [[TMP16]] = add <4 x i64> [[VEC_PHI4]], [[TMP12]] |
| 48 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| 49 | +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 50 | +; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 51 | +; CHECK: [[MIDDLE_BLOCK]]: |
| 52 | +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i64> [[TMP14]], [[TMP13]] |
| 53 | +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i64> [[TMP15]], [[BIN_RDX]] |
| 54 | +; CHECK-NEXT: [[BIN_RDX9:%.*]] = add <4 x i64> [[TMP16]], [[BIN_RDX8]] |
| 55 | +; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[BIN_RDX9]]) |
| 56 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] |
| 57 | +; CHECK-NEXT: br i1 [[CMP_N]], label %[[BB25:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] |
| 58 | +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: |
| 59 | +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 |
| 60 | +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] |
| 61 | +; CHECK: [[VEC_EPILOG_PH]]: |
| 62 | +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 63 | +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 64 | +; CHECK-NEXT: [[N_MOD_VF10:%.*]] = urem i64 [[TMP4]], 4 |
| 65 | +; CHECK-NEXT: [[N_VEC11:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF10]] |
| 66 | +; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 |
| 67 | +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] |
| 68 | +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: |
| 69 | +; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT15:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] |
| 70 | +; CHECK-NEXT: [[VEC_PHI13:%.*]] = phi <4 x i64> [ [[TMP19]], %[[VEC_EPILOG_PH]] ], [ [[TMP22:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] |
| 71 | +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[INDEX12]] |
| 72 | +; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4 |
| 73 | +; CHECK-NEXT: [[TMP21:%.*]] = sext <4 x i32> [[WIDE_LOAD14]] to <4 x i64> |
| 74 | +; CHECK-NEXT: [[TMP22]] = add <4 x i64> [[VEC_PHI13]], [[TMP21]] |
| 75 | +; CHECK-NEXT: [[INDEX_NEXT15]] = add nuw i64 [[INDEX12]], 4 |
| 76 | +; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT15]], [[N_VEC11]] |
| 77 | +; CHECK-NEXT: br i1 [[TMP23]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| 78 | +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: |
| 79 | +; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP22]]) |
| 80 | +; CHECK-NEXT: [[CMP_N16:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC11]] |
| 81 | +; CHECK-NEXT: br i1 [[CMP_N16]], label %[[BB25]], label %[[VEC_EPILOG_SCALAR_PH]] |
| 82 | +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: |
| 83 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] |
| 84 | +; CHECK-NEXT: [[BC_MERGE_RDX17:%.*]] = phi i64 [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] |
| 85 | +; CHECK-NEXT: br label %[[BB29:.*]] |
| 86 | +; CHECK: [[BB25]]: |
| 87 | +; CHECK-NEXT: [[TMP26:%.*]] = phi i64 [ [[TMP35:%.*]], %[[BB29]] ], [ [[TMP18]], %[[MIDDLE_BLOCK]] ], [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] |
| 88 | +; CHECK-NEXT: br label %[[BB27]] |
| 89 | +; CHECK: [[BB27]]: |
| 90 | +; CHECK-NEXT: [[TMP28:%.*]] = phi i64 [ 0, [[TMP2:%.*]] ], [ [[TMP26]], %[[BB25]] ] |
| 91 | +; CHECK-NEXT: ret i64 [[TMP28]] |
| 92 | +; CHECK: [[BB29]]: |
| 93 | +; CHECK-NEXT: [[TMP30:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[TMP36:%.*]], %[[BB29]] ] |
| 94 | +; CHECK-NEXT: [[TMP31:%.*]] = phi i64 [ [[BC_MERGE_RDX17]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[TMP35]], %[[BB29]] ] |
| 95 | +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[TMP30]] |
| 96 | +; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 |
| 97 | +; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 |
| 98 | +; CHECK-NEXT: [[TMP35]] = add i64 [[TMP31]], [[TMP34]] |
| 99 | +; CHECK-NEXT: [[TMP36]] = add nuw nsw i64 [[TMP30]], 1 |
| 100 | +; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[TMP36]], [[TMP4]] |
| 101 | +; CHECK-NEXT: br i1 [[TMP37]], label %[[BB25]], label %[[BB29]], !llvm.loop [[LOOP5:![0-9]+]] |
| 102 | +; |
| 103 | + %3 = icmp sgt i32 %1, 0 |
| 104 | + br i1 %3, label %4, label %8 |
| 105 | + |
| 106 | +4: ; preds = %2 |
| 107 | + %5 = zext nneg i32 %1 to i64 |
| 108 | + br label %10 |
| 109 | + |
| 110 | +6: ; preds = %10 |
| 111 | + %7 = phi i64 [ %16, %10 ] |
| 112 | + br label %8 |
| 113 | + |
| 114 | +8: ; preds = %6, %2 |
| 115 | + %9 = phi i64 [ 0, %2 ], [ %7, %6 ] |
| 116 | + ret i64 %9 |
| 117 | + |
| 118 | +10: ; preds = %4, %10 |
| 119 | + %11 = phi i64 [ 0, %4 ], [ %17, %10 ] |
| 120 | + %12 = phi i64 [ 0, %4 ], [ %16, %10 ] |
| 121 | + %13 = getelementptr inbounds nuw i32, ptr %0, i64 %11 |
| 122 | + %14 = load i32, ptr %13, align 4 |
| 123 | + %15 = sext i32 %14 to i64 |
| 124 | + %16 = add i64 %12, %15 |
| 125 | + %17 = add nuw nsw i64 %11, 1 |
| 126 | + %18 = icmp eq i64 %17, %5 |
| 127 | + br i1 %18, label %6, label %10 |
| 128 | +} |
| 129 | +;. |
| 130 | +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 131 | +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 132 | +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 133 | +; CHECK: [[PROF3]] = !{!"branch_weights", i32 4, i32 12} |
| 134 | +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} |
| 135 | +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} |
| 136 | +;. |
0 commit comments