Skip to content

Conversation

@david-arm
Copy link
Contributor

In this patch I've made two changes:

  1. The aarch64-reassociate-accumulators-sve.ll test wasn't representative of loop vectorised code because the pointers were incrementing by a fixed value and so were only valid when vscale=1. I've changed this to increment the pointer by the appropriate scalable amount.
  2. I've generated the CHECK lines for both the NEON and SVE test using filters to make the tests more maintainable.

In this patch I've made two changes:

1. The aarch64-reassociate-accumulators-sve.ll test wasn't
representative of loop vectorised code because the pointers
were incrementing by a fixed value and so were only valid
when vscale=1. I've changed this to increment the pointer
by the appropriate scalable amount.
2. I've generated the CHECK lines for both the NEON and SVE
test using filters to make the tests more maintainable.
@llvmbot
Copy link
Member

llvmbot commented Jun 3, 2025

@llvm/pr-subscribers-backend-aarch64

Author: David Sherwood (david-arm)

Changes

In this patch I've made two changes:

  1. The aarch64-reassociate-accumulators-sve.ll test wasn't representative of loop vectorised code because the pointers were incrementing by a fixed value and so were only valid when vscale=1. I've changed this to increment the pointer by the appropriate scalable amount.
  2. I've generated the CHECK lines for both the NEON and SVE test using filters to make the tests more maintainable.

Patch is 45.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/142625.diff

2 Files Affected:

  • (modified) llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll (+345-170)
  • (modified) llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll (+183-182)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
index 0d4c053551011..5547567092903 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
@@ -1,29 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter "aba" --filter "abd" --filter "add" --version 5
 ; RUN: opt -passes=loop-unroll %s -o - | llc -O3 - -mtriple=aarch64-unknown-unknown -mcpu=neoverse-v2 -o - | FileCheck %s
 
 define i64 @sabalb_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalb_i32_to_i64_accumulation
+; CHECK-LABEL: sabalb_i32_to_i64_accumulation:
+; CHECK:    sabdlb z1.d, z1.s, z2.s
+; CHECK:    sabalb z0.d, z3.s, z4.s
+; CHECK:    sabalb z1.d, z4.s, z2.s
+; CHECK:    sabdlb z2.d, z3.s, z2.s
+; CHECK:    sabalb z2.d, z4.s, z3.s
+; CHECK:    sabalb z0.d, z3.s, z4.s
+; CHECK:    sabalb z1.d, z3.s, z4.s
+; CHECK:    sabalb z2.d, z3.s, z4.s
+; CHECK:    sabalb z0.d, z3.s, z4.s
+; CHECK:    sabalb z1.d, z3.s, z4.s
+; CHECK:    sabalb z2.d, z3.s, z4.s
+; CHECK:    sabalb z0.d, z3.s, z4.s
+; CHECK:    sabalb z1.d, z3.s, z4.s
+; CHECK:    sabalb z2.d, z3.s, z4.s
+; CHECK:    sabalb z0.d, z3.s, z4.s
+; CHECK:    add z0.d, z2.d, z0.d
+; CHECK:    sabalb z1.d, z3.s, z4.s
+; CHECK:    add z0.d, z0.d, z1.d
+; CHECK:    uaddv d0, p0, z0.d
 entry:
   br label %loop
 loop:
-; CHECK: sabdlb
-; CHECK: sabalb z0.d
-; CHECK: sabalb z1.d
-; CHECK: sabalb z2.d
-; CHECK: add	z0.d, z2.d, z0.d
-; CHECK: add	z0.d, z0.d, z1.d
-; CHECK: uaddv	d0, p0, z0.d
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 4 x i32>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 4 x i32>, ptr %ptr2, i32 %i
   %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
   %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %acc_phi,
                                                                        <vscale x 4 x i32> %a,
                                                                        <vscale x 4 x i32> %b)
-  
-  %next_i = add i32 %i, 4
-  %cmp = icmp slt i32 %next_i, 64
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
@@ -34,29 +47,41 @@ declare <vscale x  2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64>,
 declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
 
 define i32 @sabalb_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalb_i16_to_i32_accumulation
+; CHECK-LABEL: sabalb_i16_to_i32_accumulation:
+; CHECK:    sabdlb z1.s, z1.h, z2.h
+; CHECK:    sabalb z0.s, z3.h, z4.h
+; CHECK:    sabalb z1.s, z4.h, z2.h
+; CHECK:    sabdlb z2.s, z3.h, z2.h
+; CHECK:    sabalb z2.s, z4.h, z3.h
+; CHECK:    sabalb z0.s, z3.h, z4.h
+; CHECK:    sabalb z1.s, z3.h, z4.h
+; CHECK:    sabalb z2.s, z3.h, z4.h
+; CHECK:    sabalb z0.s, z3.h, z4.h
+; CHECK:    sabalb z1.s, z3.h, z4.h
+; CHECK:    sabalb z2.s, z3.h, z4.h
+; CHECK:    sabalb z0.s, z3.h, z4.h
+; CHECK:    sabalb z1.s, z3.h, z4.h
+; CHECK:    sabalb z2.s, z3.h, z4.h
+; CHECK:    sabalb z0.s, z3.h, z4.h
+; CHECK:    add z0.s, z2.s, z0.s
+; CHECK:    sabalb z1.s, z3.h, z4.h
+; CHECK:    add z0.s, z0.s, z1.s
+; CHECK:    uaddv d0, p0, z0.s
 entry:
   br label %loop
 loop:
-; CHECK: sabdlb
-; CHECK: sabalb z0.s
-; CHECK: sabalb z1.s
-; CHECK: sabalb z2.s
-; CHECK: add	z0.s, z2.s, z0.s
-; CHECK: add	z0.s, z0.s, z1.s
-; CHECK: uaddv	d0, p0, z0.s
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 8 x i16>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 8 x i16>, ptr %ptr2, i32 %i
   %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
   %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %acc_phi,
                                                                        <vscale x 8 x i16> %a,
                                                                        <vscale x 8 x i16> %b)
-  
-  %next_i = add i32 %i, 8
-  %cmp = icmp slt i32 %next_i, 128
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
@@ -67,29 +92,41 @@ declare <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32>,
 declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
 
 define i16 @sabalb_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalb_i8_to_i16_accumulation
+; CHECK-LABEL: sabalb_i8_to_i16_accumulation:
+; CHECK:    sabdlb z1.h, z1.b, z2.b
+; CHECK:    sabalb z0.h, z3.b, z4.b
+; CHECK:    sabalb z1.h, z4.b, z2.b
+; CHECK:    sabdlb z2.h, z3.b, z2.b
+; CHECK:    sabalb z2.h, z4.b, z3.b
+; CHECK:    sabalb z0.h, z3.b, z4.b
+; CHECK:    sabalb z1.h, z3.b, z4.b
+; CHECK:    sabalb z2.h, z3.b, z4.b
+; CHECK:    sabalb z0.h, z3.b, z4.b
+; CHECK:    sabalb z1.h, z3.b, z4.b
+; CHECK:    sabalb z2.h, z3.b, z4.b
+; CHECK:    sabalb z0.h, z3.b, z4.b
+; CHECK:    sabalb z1.h, z3.b, z4.b
+; CHECK:    sabalb z2.h, z3.b, z4.b
+; CHECK:    sabalb z0.h, z3.b, z4.b
+; CHECK:    add z0.h, z2.h, z0.h
+; CHECK:    sabalb z1.h, z3.b, z4.b
+; CHECK:    add z0.h, z0.h, z1.h
+; CHECK:    uaddv d0, p0, z0.h
 entry:
   br label %loop
 loop:
-; CHECK: sabdlb
-; CHECK: sabalb z0.h
-; CHECK: sabalb z1.h
-; CHECK: sabalb z2.h
-; CHECK: add	z0.h, z2.h, z0.h
-; CHECK: add	z0.h, z0.h, z1.h
-; CHECK: uaddv	d0, p0, z0.h
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 16 x i8>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 16 x i8>, ptr %ptr2, i32 %i
   %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
   %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %acc_phi,
                                                                        <vscale x 16 x i8> %a,
                                                                        <vscale x 16 x i8> %b)
-  
-  %next_i = add i32 %i, 16
-  %cmp = icmp slt i32 %next_i, 256
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
@@ -100,29 +137,41 @@ declare <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16>,
 declare i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16>)
 
 define i64 @sabalt_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalt_i32_to_i64_accumulation
+; CHECK-LABEL: sabalt_i32_to_i64_accumulation:
+; CHECK:    sabdlt z1.d, z1.s, z2.s
+; CHECK:    sabalt z0.d, z3.s, z4.s
+; CHECK:    sabalt z1.d, z4.s, z2.s
+; CHECK:    sabdlt z2.d, z3.s, z2.s
+; CHECK:    sabalt z2.d, z4.s, z3.s
+; CHECK:    sabalt z0.d, z3.s, z4.s
+; CHECK:    sabalt z1.d, z3.s, z4.s
+; CHECK:    sabalt z2.d, z3.s, z4.s
+; CHECK:    sabalt z0.d, z3.s, z4.s
+; CHECK:    sabalt z1.d, z3.s, z4.s
+; CHECK:    sabalt z2.d, z3.s, z4.s
+; CHECK:    sabalt z0.d, z3.s, z4.s
+; CHECK:    sabalt z1.d, z3.s, z4.s
+; CHECK:    sabalt z2.d, z3.s, z4.s
+; CHECK:    sabalt z0.d, z3.s, z4.s
+; CHECK:    add z0.d, z2.d, z0.d
+; CHECK:    sabalt z1.d, z3.s, z4.s
+; CHECK:    add z0.d, z0.d, z1.d
+; CHECK:    uaddv d0, p0, z0.d
 entry:
   br label %loop
 loop:
-; CHECK: sabdlt
-; CHECK: sabalt z0.d
-; CHECK: sabalt z1.d
-; CHECK: sabalt z2.d
-; CHECK: add	z0.d, z2.d, z0.d
-; CHECK: add	z0.d, z0.d, z1.d
-; CHECK: uaddv	d0, p0, z0.d
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 4 x i32>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 4 x i32>, ptr %ptr2, i32 %i
   %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
   %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %acc_phi,
                                                                        <vscale x 4 x i32> %a,
                                                                        <vscale x 4 x i32> %b)
-  
-  %next_i = add i32 %i, 4
-  %cmp = icmp slt i32 %next_i, 64
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
@@ -132,29 +181,41 @@ exit:
 declare <vscale x  2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 
 define i32 @sabalt_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalt_i16_to_i32_accumulation
+; CHECK-LABEL: sabalt_i16_to_i32_accumulation:
+; CHECK:    sabdlt z1.s, z1.h, z2.h
+; CHECK:    sabalt z0.s, z3.h, z4.h
+; CHECK:    sabalt z1.s, z4.h, z2.h
+; CHECK:    sabdlt z2.s, z3.h, z2.h
+; CHECK:    sabalt z2.s, z4.h, z3.h
+; CHECK:    sabalt z0.s, z3.h, z4.h
+; CHECK:    sabalt z1.s, z3.h, z4.h
+; CHECK:    sabalt z2.s, z3.h, z4.h
+; CHECK:    sabalt z0.s, z3.h, z4.h
+; CHECK:    sabalt z1.s, z3.h, z4.h
+; CHECK:    sabalt z2.s, z3.h, z4.h
+; CHECK:    sabalt z0.s, z3.h, z4.h
+; CHECK:    sabalt z1.s, z3.h, z4.h
+; CHECK:    sabalt z2.s, z3.h, z4.h
+; CHECK:    sabalt z0.s, z3.h, z4.h
+; CHECK:    add z0.s, z2.s, z0.s
+; CHECK:    sabalt z1.s, z3.h, z4.h
+; CHECK:    add z0.s, z0.s, z1.s
+; CHECK:    uaddv d0, p0, z0.s
 entry:
   br label %loop
 loop:
-; CHECK: sabdlt
-; CHECK: sabalt z0.s
-; CHECK: sabalt z1.s
-; CHECK: sabalt z2.s
-; CHECK: add	z0.s, z2.s, z0.s
-; CHECK: add	z0.s, z0.s, z1.s
-; CHECK: uaddv	d0, p0, z0.s
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 8 x i16>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 8 x i16>, ptr %ptr2, i32 %i
   %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
   %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %acc_phi,
                                                                        <vscale x 8 x i16> %a,
                                                                        <vscale x 8 x i16> %b)
-  
-  %next_i = add i32 %i, 8
-  %cmp = icmp slt i32 %next_i, 128
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
@@ -164,29 +225,41 @@ exit:
 declare <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 
 define i16 @sabalt_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: sabalt_i8_to_i16_accumulation
+; CHECK-LABEL: sabalt_i8_to_i16_accumulation:
+; CHECK:    sabdlt z1.h, z1.b, z2.b
+; CHECK:    sabalt z0.h, z3.b, z4.b
+; CHECK:    sabalt z1.h, z4.b, z2.b
+; CHECK:    sabdlt z2.h, z3.b, z2.b
+; CHECK:    sabalt z2.h, z4.b, z3.b
+; CHECK:    sabalt z0.h, z3.b, z4.b
+; CHECK:    sabalt z1.h, z3.b, z4.b
+; CHECK:    sabalt z2.h, z3.b, z4.b
+; CHECK:    sabalt z0.h, z3.b, z4.b
+; CHECK:    sabalt z1.h, z3.b, z4.b
+; CHECK:    sabalt z2.h, z3.b, z4.b
+; CHECK:    sabalt z0.h, z3.b, z4.b
+; CHECK:    sabalt z1.h, z3.b, z4.b
+; CHECK:    sabalt z2.h, z3.b, z4.b
+; CHECK:    sabalt z0.h, z3.b, z4.b
+; CHECK:    add z0.h, z2.h, z0.h
+; CHECK:    sabalt z1.h, z3.b, z4.b
+; CHECK:    add z0.h, z0.h, z1.h
+; CHECK:    uaddv d0, p0, z0.h
 entry:
   br label %loop
 loop:
-; CHECK: sabdlt
-; CHECK: sabalt z0.h
-; CHECK: sabalt z1.h
-; CHECK: sabalt z2.h
-; CHECK: add	z0.h, z2.h, z0.h
-; CHECK: add	z0.h, z0.h, z1.h
-; CHECK: uaddv	d0, p0, z0.h
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 16 x i8>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 16 x i8>, ptr %ptr2, i32 %i
   %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
   %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %acc_phi,
                                                                        <vscale x 16 x i8> %a,
                                                                        <vscale x 16 x i8> %b)
-  
-  %next_i = add i32 %i, 16
-  %cmp = icmp slt i32 %next_i, 256
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
@@ -196,29 +269,41 @@ exit:
 declare <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 
 define i64 @uabalb_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: uabalb_i32_to_i64_accumulation
+; CHECK-LABEL: uabalb_i32_to_i64_accumulation:
+; CHECK:    uabdlb z1.d, z1.s, z2.s
+; CHECK:    uabalb z0.d, z3.s, z4.s
+; CHECK:    uabalb z1.d, z4.s, z2.s
+; CHECK:    uabdlb z2.d, z3.s, z2.s
+; CHECK:    uabalb z2.d, z4.s, z3.s
+; CHECK:    uabalb z0.d, z3.s, z4.s
+; CHECK:    uabalb z1.d, z3.s, z4.s
+; CHECK:    uabalb z2.d, z3.s, z4.s
+; CHECK:    uabalb z0.d, z3.s, z4.s
+; CHECK:    uabalb z1.d, z3.s, z4.s
+; CHECK:    uabalb z2.d, z3.s, z4.s
+; CHECK:    uabalb z0.d, z3.s, z4.s
+; CHECK:    uabalb z1.d, z3.s, z4.s
+; CHECK:    uabalb z2.d, z3.s, z4.s
+; CHECK:    uabalb z0.d, z3.s, z4.s
+; CHECK:    add z0.d, z2.d, z0.d
+; CHECK:    uabalb z1.d, z3.s, z4.s
+; CHECK:    add z0.d, z0.d, z1.d
+; CHECK:    uaddv d0, p0, z0.d
 entry:
   br label %loop
 loop:
-; CHECK: uabdlb
-; CHECK: uabalb z0.d
-; CHECK: uabalb z1.d
-; CHECK: uabalb z2.d
-; CHECK: add	z0.d, z2.d, z0.d
-; CHECK: add	z0.d, z0.d, z1.d
-; CHECK: uaddv	d0, p0, z0.d
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 4 x i32>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 4 x i32>, ptr %ptr2, i32 %i
   %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
   %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %acc_phi,
                                                                        <vscale x 4 x i32> %a,
                                                                        <vscale x 4 x i32> %b)
-  
-  %next_i = add i32 %i, 4
-  %cmp = icmp slt i32 %next_i, 64
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
@@ -228,29 +313,41 @@ exit:
 declare <vscale x  2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 
 define i32 @uabalb_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: uabalb_i16_to_i32_accumulation
+; CHECK-LABEL: uabalb_i16_to_i32_accumulation:
+; CHECK:    uabdlb z1.s, z1.h, z2.h
+; CHECK:    uabalb z0.s, z3.h, z4.h
+; CHECK:    uabalb z1.s, z4.h, z2.h
+; CHECK:    uabdlb z2.s, z3.h, z2.h
+; CHECK:    uabalb z2.s, z4.h, z3.h
+; CHECK:    uabalb z0.s, z3.h, z4.h
+; CHECK:    uabalb z1.s, z3.h, z4.h
+; CHECK:    uabalb z2.s, z3.h, z4.h
+; CHECK:    uabalb z0.s, z3.h, z4.h
+; CHECK:    uabalb z1.s, z3.h, z4.h
+; CHECK:    uabalb z2.s, z3.h, z4.h
+; CHECK:    uabalb z0.s, z3.h, z4.h
+; CHECK:    uabalb z1.s, z3.h, z4.h
+; CHECK:    uabalb z2.s, z3.h, z4.h
+; CHECK:    uabalb z0.s, z3.h, z4.h
+; CHECK:    add z0.s, z2.s, z0.s
+; CHECK:    uabalb z1.s, z3.h, z4.h
+; CHECK:    add z0.s, z0.s, z1.s
+; CHECK:    uaddv d0, p0, z0.s
 entry:
   br label %loop
 loop:
-; CHECK: uabdlb
-; CHECK: uabalb z0.s
-; CHECK: uabalb z1.s
-; CHECK: uabalb z2.s
-; CHECK: add	z0.s, z2.s, z0.s
-; CHECK: add	z0.s, z0.s, z1.s
-; CHECK: uaddv	d0, p0, z0.s
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 8 x i16>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 8 x i16>, ptr %ptr2, i32 %i
   %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
   %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %acc_phi,
                                                                        <vscale x 8 x i16> %a,
                                                                        <vscale x 8 x i16> %b)
-  
-  %next_i = add i32 %i, 8
-  %cmp = icmp slt i32 %next_i, 128
+
+  %next_i = add i32 %i, 1
+  %cmp = icmp slt i32 %next_i, 16
   br i1 %cmp, label %loop, label %exit
 exit:
   %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
@@ -260,29 +357,41 @@ exit:
 declare <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 
 define i16 @uabalb_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
-  ; CHECK-LABEL: uabalb_i8_to_i16_accumulation
+; CHECK-LABEL: uabalb_i8_to_i16_accumulation:
+; CHECK:    uabdlb z1.h, z1.b, z2.b
+; CHECK:    uabalb z0.h, z3.b, z4.b
+; CHECK:    uabalb z1.h, z4.b, z2.b
+; CHECK:    uabdlb z2.h, z3.b, z2.b
+; CHECK:    uabalb z2.h, z4.b, z3.b
+; CHECK:    uabalb z0.h, z3.b, z4.b
+; CHECK:    uabalb z1.h, z3.b, z4.b
+; CHECK:    uabalb z2.h, z3.b, z4.b
+; CHECK:    uabalb z0.h, z3.b, z4.b
+; CHECK:    uabalb z1.h, z3.b, z4.b
+; CHECK:    uabalb z2.h, z3.b, z4.b
+; CHECK:    uabalb z0.h, z3.b, z4.b
+; CHECK:    uabalb z1.h, z3.b, z4.b
+; CHECK:    uabalb z2.h, z3.b, z4.b
+; CHECK:    uabalb z0.h, z3.b, z4.b
+; CHECK:    add z0.h, z2.h, z0.h
+; CHECK:    uabalb z1.h, z3.b, z4.b
+; CHECK:    add z0.h, z0.h, z1.h
+; CHECK:    uaddv d0, p0, z0.h
 entry:
   br label %loop
 loop:
-; CHECK: uabdlb
-; CHECK: uabalb z0.h
-; CHECK: uabalb z1.h
-; CHECK: uabalb z2.h
-; CHECK: add	z0.h, z2.h, z0.h
-; CHECK: add	z0.h, z0.h, z1.h
-; CHECK: uaddv	d0, p0, z0.h
   %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
   %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
-  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
-  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %ptr1_i = getelementptr <vscale x 16 x i8>, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr <vscale x 16 x i8>, ptr %ptr2, i32 %i
   %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
   %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
   %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %acc_phi,
                                                                        <vscale x 16 x i8> %a,
                                                                        <vscale x 16 x i8> %b)
-  
-  ...
[truncated]

@david-arm david-arm merged commit 11a9dad into llvm:main Jun 4, 2025
11 of 13 checks passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants