Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 99 additions & 0 deletions llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-add.ll
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We already have a lot of partial-reduce-* files already. It would be good to add to an existing file rather than create a new one.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In all other files, there are additional options or cmd line arguments being used which are irrelevant for this test. Hence, I decided to go with seperate file.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The file partial-reduce.ll already contains very similar tests, e.g. see sext_add_reduc_i8_i32 in partial-reduce.ll. The test you're adding is just another variant of that, which is why it feels like a more appropriate place.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose what you want is a neoverse-v2 specific RUN line, which you could add to partial-reduce.ll so that we get better testing coverage for the other variants too?

Copy link
Contributor Author

@sushgokh sushgokh Oct 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done. Instead of adding new RUN line with grace cpu, used the feature in the same as other subtests are already doing

Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^middle" --version 6
; RUN: opt < %s -p loop-vectorize -mtriple=aarch64 -S -o - | FileCheck %s
; RUN: opt < %s -p loop-vectorize -mtriple=aarch64 -mcpu=neoverse-v2 -S -o - | FileCheck %s --check-prefix NEOVERSE-V2

define i64 @partial_reduction_with_no_second_input(ptr %arr, i64 %N)
; CHECK-LABEL: define i64 @partial_reduction_with_no_second_input(
; CHECK-SAME: ptr [[ARR:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 8
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 8
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[WIDE_LOAD2]] to <4 x i64>
; CHECK-NEXT: [[TMP4]] = add <4 x i64> [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP5]] = add <4 x i64> [[VEC_PHI1]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
;
; NEOVERSE-V2-LABEL: define i64 @partial_reduction_with_no_second_input(
; NEOVERSE-V2-SAME: ptr [[ARR:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; NEOVERSE-V2-NEXT: [[ITER_CHECK:.*:]]
; NEOVERSE-V2-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
; NEOVERSE-V2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 2
; NEOVERSE-V2-NEXT: br i1 [[MIN_ITERS_CHECK]], [[VEC_EPILOG_SCALAR_PH:label %.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
; NEOVERSE-V2: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
; NEOVERSE-V2-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[UMAX]], 8
; NEOVERSE-V2-NEXT: br i1 [[MIN_ITERS_CHECK1]], [[VEC_EPILOG_PH:label %.*]], label %[[VECTOR_PH:.*]]
; NEOVERSE-V2: [[VECTOR_PH]]:
; NEOVERSE-V2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 8
; NEOVERSE-V2-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
; NEOVERSE-V2-NEXT: br label %[[VECTOR_BODY:.*]]
; NEOVERSE-V2: [[VECTOR_BODY]]:
; NEOVERSE-V2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NEOVERSE-V2-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
; NEOVERSE-V2-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
; NEOVERSE-V2-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP10:%.*]], %[[VECTOR_BODY]] ]
; NEOVERSE-V2-NEXT: [[VEC_PHI4:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
; NEOVERSE-V2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]]
; NEOVERSE-V2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 2
; NEOVERSE-V2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4
; NEOVERSE-V2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 6
; NEOVERSE-V2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4
; NEOVERSE-V2-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4
; NEOVERSE-V2-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
; NEOVERSE-V2-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4
; NEOVERSE-V2-NEXT: [[TMP4:%.*]] = sext <2 x i32> [[WIDE_LOAD]] to <2 x i64>
; NEOVERSE-V2-NEXT: [[TMP5:%.*]] = sext <2 x i32> [[WIDE_LOAD5]] to <2 x i64>
; NEOVERSE-V2-NEXT: [[TMP6:%.*]] = sext <2 x i32> [[WIDE_LOAD6]] to <2 x i64>
; NEOVERSE-V2-NEXT: [[TMP7:%.*]] = sext <2 x i32> [[WIDE_LOAD7]] to <2 x i64>
; NEOVERSE-V2-NEXT: [[TMP8]] = add <2 x i64> [[VEC_PHI]], [[TMP4]]
; NEOVERSE-V2-NEXT: [[TMP9]] = add <2 x i64> [[VEC_PHI2]], [[TMP5]]
; NEOVERSE-V2-NEXT: [[TMP10]] = add <2 x i64> [[VEC_PHI3]], [[TMP6]]
; NEOVERSE-V2-NEXT: [[TMP11]] = add <2 x i64> [[VEC_PHI4]], [[TMP7]]
; NEOVERSE-V2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NEOVERSE-V2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NEOVERSE-V2-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NEOVERSE-V2: [[MIDDLE_BLOCK]]:
;
{
entry:
br label %loop

loop:
%1 = phi i64 [ 0, %entry ], [ %2, %loop ]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Could you give this a name such as %iv and %iv.next for the update?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

since induction var wasnt the focus of the test, I didnt highlight it by giving it a name. Is there any convention which I should follow as in when to name the vars and when to not?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah there is a convention, see test/Transforms/LoopVectorize/pr35743.ll for example. Typically we use %iv and %iv.next, e.g.

loop:                                            ; preds = %loop, %entry
  %accum.phi = phi i8 [ %c, %entry ], [ %accum.plus, %loop ]
  %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
  %accum.and = and i8 %accum.phi, 1
  %accum.plus = add nuw nsw i8 %accum.and, 3
  %iv.next = add nuw nsw i32 %iv, 1
  %cond = icmp ugt i32 %iv, 191
  br i1 %cond, label %exit, label %loop

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

%acc = phi i64 [ 0, %entry ], [ %add, %loop ]
%gep = getelementptr inbounds i32, ptr %arr, i64 %1
%load = load i32, ptr %gep
%sext = sext i32 %load to i64
%add = add i64 %acc, %sext
%2 = add i64 %1, 1
%3 = icmp ult i64 %2, %N
br i1 %3, label %loop, label %exit

exit:
ret i64 %add
}
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
;.
; NEOVERSE-V2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; NEOVERSE-V2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; NEOVERSE-V2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
;.