Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21814,7 +21814,7 @@ SDValue tryLowerPartialReductionToWideAdd(SDNode *N,
Intrinsic::experimental_vector_partial_reduce_add &&
"Expected a partial reduction node");

if (!Subtarget->isSVEorStreamingSVEAvailable())
if (!Subtarget->hasSVE2() && !Subtarget->isStreamingSVEAvailable())
return SDValue();

SDLoc DL(N);
Expand Down
111 changes: 80 additions & 31 deletions llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
Original file line number Diff line number Diff line change
@@ -1,72 +1,121 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE2
; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE

define <vscale x 2 x i64> @signed_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){
; CHECK-LABEL: signed_wide_add_nxv4i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: saddwb z0.d, z0.d, z1.s
; CHECK-NEXT: saddwt z0.d, z0.d, z1.s
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: signed_wide_add_nxv4i32:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: saddwb z0.d, z0.d, z1.s
; CHECK-SVE2-NEXT: saddwt z0.d, z0.d, z1.s
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: signed_wide_add_nxv4i32:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: sunpklo z2.d, z1.s
; CHECK-SVE-NEXT: sunpkhi z1.d, z1.s
; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d
; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d
; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 4 x i32> %input to <vscale x 4 x i64>
%partial.reduce = tail call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide)
ret <vscale x 2 x i64> %partial.reduce
}

define <vscale x 2 x i64> @unsigned_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){
; CHECK-LABEL: unsigned_wide_add_nxv4i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: uaddwb z0.d, z0.d, z1.s
; CHECK-NEXT: uaddwt z0.d, z0.d, z1.s
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: unsigned_wide_add_nxv4i32:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: uaddwb z0.d, z0.d, z1.s
; CHECK-SVE2-NEXT: uaddwt z0.d, z0.d, z1.s
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: unsigned_wide_add_nxv4i32:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: uunpklo z2.d, z1.s
; CHECK-SVE-NEXT: uunpkhi z1.d, z1.s
; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d
; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d
; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 4 x i32> %input to <vscale x 4 x i64>
%partial.reduce = tail call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide)
ret <vscale x 2 x i64> %partial.reduce
}

define <vscale x 4 x i32> @signed_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){
; CHECK-LABEL: signed_wide_add_nxv8i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: saddwb z0.s, z0.s, z1.h
; CHECK-NEXT: saddwt z0.s, z0.s, z1.h
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: signed_wide_add_nxv8i16:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: saddwb z0.s, z0.s, z1.h
; CHECK-SVE2-NEXT: saddwt z0.s, z0.s, z1.h
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: signed_wide_add_nxv8i16:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: sunpklo z2.s, z1.h
; CHECK-SVE-NEXT: sunpkhi z1.s, z1.h
; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s
; CHECK-SVE-NEXT: add z0.s, z1.s, z0.s
; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 8 x i16> %input to <vscale x 8 x i32>
%partial.reduce = tail call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide)
ret <vscale x 4 x i32> %partial.reduce
}

define <vscale x 4 x i32> @unsigned_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){
; CHECK-LABEL: unsigned_wide_add_nxv8i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: uaddwb z0.s, z0.s, z1.h
; CHECK-NEXT: uaddwt z0.s, z0.s, z1.h
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: unsigned_wide_add_nxv8i16:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: uaddwb z0.s, z0.s, z1.h
; CHECK-SVE2-NEXT: uaddwt z0.s, z0.s, z1.h
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: unsigned_wide_add_nxv8i16:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: uunpklo z2.s, z1.h
; CHECK-SVE-NEXT: uunpkhi z1.s, z1.h
; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s
; CHECK-SVE-NEXT: add z0.s, z1.s, z0.s
; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 8 x i16> %input to <vscale x 8 x i32>
%partial.reduce = tail call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide)
ret <vscale x 4 x i32> %partial.reduce
}

define <vscale x 8 x i16> @signed_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){
; CHECK-LABEL: signed_wide_add_nxv16i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: saddwb z0.h, z0.h, z1.b
; CHECK-NEXT: saddwt z0.h, z0.h, z1.b
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: signed_wide_add_nxv16i8:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: saddwb z0.h, z0.h, z1.b
; CHECK-SVE2-NEXT: saddwt z0.h, z0.h, z1.b
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: signed_wide_add_nxv16i8:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: sunpklo z2.h, z1.b
; CHECK-SVE-NEXT: sunpkhi z1.h, z1.b
; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h
; CHECK-SVE-NEXT: add z0.h, z1.h, z0.h
; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 16 x i8> %input to <vscale x 16 x i16>
%partial.reduce = tail call <vscale x 8 x i16> @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide)
ret <vscale x 8 x i16> %partial.reduce
}

define <vscale x 8 x i16> @unsigned_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){
; CHECK-LABEL: unsigned_wide_add_nxv16i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: uaddwb z0.h, z0.h, z1.b
; CHECK-NEXT: uaddwt z0.h, z0.h, z1.b
; CHECK-NEXT: ret
; CHECK-SVE2-LABEL: unsigned_wide_add_nxv16i8:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: uaddwb z0.h, z0.h, z1.b
; CHECK-SVE2-NEXT: uaddwt z0.h, z0.h, z1.b
; CHECK-SVE2-NEXT: ret
;
; CHECK-SVE-LABEL: unsigned_wide_add_nxv16i8:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: uunpklo z2.h, z1.b
; CHECK-SVE-NEXT: uunpkhi z1.h, z1.b
; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h
; CHECK-SVE-NEXT: add z0.h, z1.h, z0.h
; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 16 x i8> %input to <vscale x 16 x i16>
%partial.reduce = tail call <vscale x 8 x i16> @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide)
Expand Down
Loading