From 52434bcc30300a535548b1cd76298606e17787af Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Tue, 20 May 2025 15:32:36 +0000 Subject: [PATCH 1/5] [AArch64] Add custom lowering of nxv32i1 get.active.lane.mask nodes performActiveLaneMaskCombine already tries to combine a single get.active.lane.mask where the low and high halves of the result are extracted into a single whilelo which operates on a predicate pair. If the get.active.lane.mask node requires splitting, multiple nodes are created with saturating adds to increment the starting index. We cannot combine these into a single whilelo_x2 at this point unless we know the add will not overflow. This patch adds custom lowering for the node if the return type is nxv32xi1, as this can be replaced with a whilelo_x2 using legal types. Anything wider than nxv32i1 will still require splitting first. --- .../Target/AArch64/AArch64ISelLowering.cpp | 28 ++++ llvm/lib/Target/AArch64/AArch64ISelLowering.h | 3 + .../AArch64/get-active-lane-mask-extract.ll | 151 +++++++++++++++++- 3 files changed, 181 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index f0a703be35207..4eb49b9fe025e 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1501,6 +1501,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Legal); } + setOperationAction(ISD::GET_ACTIVE_LANE_MASK, MVT::nxv32i1, Custom); + for (auto VT : {MVT::v16i8, MVT::v8i8, MVT::v4i16, MVT::v2i32}) setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Custom); } @@ -27328,6 +27330,29 @@ void AArch64TargetLowering::ReplaceExtractSubVectorResults( Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half)); } +void AArch64TargetLowering::ReplaceGetActiveLaneMaskResults( + SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { + if (!Subtarget->hasSVE2p1()) + return; + + SDLoc DL(N); + SDValue Idx = N->getOperand(0); + SDValue TC = N->getOperand(1); + if (Idx.getValueType() != MVT::i64) { + Idx = DAG.getZExtOrTrunc(Idx, DL, MVT::i64); + TC = DAG.getZExtOrTrunc(TC, DL, MVT::i64); + } + + SDValue ID = + DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo_x2, DL, MVT::i64); + EVT HalfVT = N->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext()); + auto WideMask = + DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, {HalfVT, HalfVT}, {ID, Idx, TC}); + + Results.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), + {WideMask.getValue(0), WideMask.getValue(1)})); +} + // Create an even/odd pair of X registers holding integer value V. static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { SDLoc dl(V.getNode()); @@ -27714,6 +27739,9 @@ void AArch64TargetLowering::ReplaceNodeResults( // CONCAT_VECTORS -- but delegate to common code for result type // legalisation return; + case ISD::GET_ACTIVE_LANE_MASK: + ReplaceGetActiveLaneMaskResults(N, Results, DAG); + return; case ISD::INTRINSIC_WO_CHAIN: { EVT VT = N->getValueType(0); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index b59526bf01888..4c6358034af02 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1318,6 +1318,9 @@ class AArch64TargetLowering : public TargetLowering { void ReplaceExtractSubVectorResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const; + void ReplaceGetActiveLaneMaskResults(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const; bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override; diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll index 2d84a69f3144e..0b78dd963cbb0 100644 --- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll +++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll @@ -111,7 +111,7 @@ define void @test_partial_extract(i64 %i, i64 %n) #0 { ret void } -;; Negative test for when extracting a fixed-length vector. +; Negative test for when extracting a fixed-length vector. define void @test_fixed_extract(i64 %i, i64 %n) #0 { ; CHECK-SVE-LABEL: test_fixed_extract: ; CHECK-SVE: // %bb.0: @@ -151,6 +151,155 @@ define void @test_fixed_extract(i64 %i, i64 %n) #0 { ret void } +; Illegal Types + +define void @test_2x16bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) #0 { +; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE: // %bb.0: +; CHECK-SVE-NEXT: rdvl x8, #1 +; CHECK-SVE-NEXT: adds w8, w0, w8 +; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 +; CHECK-SVE-NEXT: whilelo p1.b, w8, w1 +; CHECK-SVE-NEXT: b use +; +; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE2p1: // %bb.0: +; CHECK-SVE2p1-NEXT: mov w8, w1 +; CHECK-SVE2p1-NEXT: mov w9, w0 +; CHECK-SVE2p1-NEXT: whilelo { p0.b, p1.b }, x9, x8 +; CHECK-SVE2p1-NEXT: b use + %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) + %v0 = call @llvm.vector.extract.nxv16i1.nxv32i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv16i1.nxv32i1.i64( %r, i64 16) + tail call void @use( %v0, %v1) + ret void +} + +define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) #0 { +; CHECK-SVE-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE: // %bb.0: +; CHECK-SVE-NEXT: rdvl x8, #2 +; CHECK-SVE-NEXT: rdvl x9, #1 +; CHECK-SVE-NEXT: adds w8, w0, w8 +; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE-NEXT: adds w10, w8, w9 +; CHECK-SVE-NEXT: csinv w10, w10, wzr, lo +; CHECK-SVE-NEXT: whilelo p3.b, w10, w1 +; CHECK-SVE-NEXT: adds w9, w0, w9 +; CHECK-SVE-NEXT: csinv w9, w9, wzr, lo +; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 +; CHECK-SVE-NEXT: whilelo p1.b, w9, w1 +; CHECK-SVE-NEXT: whilelo p2.b, w8, w1 +; CHECK-SVE-NEXT: b use +; +; CHECK-SVE2p1-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE2p1: // %bb.0: +; CHECK-SVE2p1-NEXT: rdvl x8, #2 +; CHECK-SVE2p1-NEXT: mov w9, w1 +; CHECK-SVE2p1-NEXT: mov w10, w0 +; CHECK-SVE2p1-NEXT: adds w8, w0, w8 +; CHECK-SVE2p1-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE2p1-NEXT: whilelo { p0.b, p1.b }, x10, x9 +; CHECK-SVE2p1-NEXT: whilelo { p2.b, p3.b }, x8, x9 +; CHECK-SVE2p1-NEXT: b use + %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 %i, i32 %n) + %v0 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 16) + %v2 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 32) + %v3 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 48) + tail call void @use( %v0, %v1, %v2, %v3) + ret void +} + +define void @test_2x16bit_mask_with_32bit_index_and_trip_count_ext8(i32 %i, i32 %n) #0 { +; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_ext8: +; CHECK-SVE: // %bb.0: +; CHECK-SVE-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SVE-NEXT: rdvl x8, #1 +; CHECK-SVE-NEXT: adds w8, w0, w8 +; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 +; CHECK-SVE-NEXT: whilelo p4.b, w8, w1 +; CHECK-SVE-NEXT: punpklo p1.h, p0.b +; CHECK-SVE-NEXT: punpkhi p3.h, p0.b +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: punpklo p2.h, p3.b +; CHECK-SVE-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE-NEXT: bl use +; CHECK-SVE-NEXT: punpklo p1.h, p4.b +; CHECK-SVE-NEXT: punpkhi p3.h, p4.b +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: punpklo p2.h, p3.b +; CHECK-SVE-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-SVE-NEXT: b use +; +; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_ext8: +; CHECK-SVE2p1: // %bb.0: +; CHECK-SVE2p1-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SVE2p1-NEXT: mov w8, w1 +; CHECK-SVE2p1-NEXT: mov w9, w0 +; CHECK-SVE2p1-NEXT: whilelo { p4.b, p5.b }, x9, x8 +; CHECK-SVE2p1-NEXT: punpklo p1.h, p4.b +; CHECK-SVE2p1-NEXT: punpkhi p3.h, p4.b +; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b +; CHECK-SVE2p1-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE2p1-NEXT: punpklo p2.h, p3.b +; CHECK-SVE2p1-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE2p1-NEXT: bl use +; CHECK-SVE2p1-NEXT: punpklo p1.h, p5.b +; CHECK-SVE2p1-NEXT: punpkhi p3.h, p5.b +; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b +; CHECK-SVE2p1-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE2p1-NEXT: punpklo p2.h, p3.b +; CHECK-SVE2p1-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE2p1-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-SVE2p1-NEXT: b use + %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) + %v0 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 4) + %v2 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 8) + %v3 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 12) + tail call void @use( %v0, %v1, %v2, %v3) + %v4 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 16) + %v5 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 20) + %v6 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 24) + %v7 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 28) + tail call void @use( %v4, %v5, %v6, %v7) + ret void +} + +; Negative test for when not extracting exactly two halves of the source vector +define void @test_illegal_type_with_partial_extracts(i32 %i, i32 %n) #0 { +; CHECK-SVE-LABEL: test_illegal_type_with_partial_extracts: +; CHECK-SVE: // %bb.0: +; CHECK-SVE-NEXT: rdvl x8, #1 +; CHECK-SVE-NEXT: adds w8, w0, w8 +; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 +; CHECK-SVE-NEXT: whilelo p1.b, w8, w1 +; CHECK-SVE-NEXT: punpkhi p0.h, p0.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: b use +; +; CHECK-SVE2p1-LABEL: test_illegal_type_with_partial_extracts: +; CHECK-SVE2p1: // %bb.0: +; CHECK-SVE2p1-NEXT: mov w8, w1 +; CHECK-SVE2p1-NEXT: mov w9, w0 +; CHECK-SVE2p1-NEXT: whilelo { p2.b, p3.b }, x9, x8 +; CHECK-SVE2p1-NEXT: punpkhi p0.h, p2.b +; CHECK-SVE2p1-NEXT: punpkhi p1.h, p3.b +; CHECK-SVE2p1-NEXT: b use + %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) + %v0 = call @llvm.vector.extract.nxv8i1.nxv32i1.i64( %r, i64 8) + %v1 = call @llvm.vector.extract.nxv8i1.nxv32i1.i64( %r, i64 24) + tail call void @use( %v0, %v1) + ret void +} + declare void @use(...) attributes #0 = { nounwind } From fa4d5b7c00da38027c9d1769ce1d157e0d2a5600 Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Tue, 3 Jun 2025 16:52:07 +0000 Subject: [PATCH 2/5] - Add asserts for the result & operand types to ReplaceGetActiveLaneMaskResults - Rename new test with partial extracts --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 11 +++++++++-- .../CodeGen/AArch64/get-active-lane-mask-extract.ll | 7 +++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 4eb49b9fe025e..8cdd7fd6f74a7 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -27335,12 +27335,19 @@ void AArch64TargetLowering::ReplaceGetActiveLaneMaskResults( if (!Subtarget->hasSVE2p1()) return; + assert(N->getValueType(0) == MVT::nxv32i1 && + "Unexpected result type for get.active.lane.mask"); + SDLoc DL(N); SDValue Idx = N->getOperand(0); SDValue TC = N->getOperand(1); + + assert(Idx.getValueType().getFixedSizeInBits() <= 64 && + "Unexpected operand type for get.active.lane.mask"); + if (Idx.getValueType() != MVT::i64) { - Idx = DAG.getZExtOrTrunc(Idx, DL, MVT::i64); - TC = DAG.getZExtOrTrunc(TC, DL, MVT::i64); + Idx = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Idx); + TC = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, TC); } SDValue ID = diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll index 0b78dd963cbb0..7cc4bc0bdc62f 100644 --- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll +++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll @@ -272,9 +272,8 @@ define void @test_2x16bit_mask_with_32bit_index_and_trip_count_ext8(i32 %i, i32 ret void } -; Negative test for when not extracting exactly two halves of the source vector -define void @test_illegal_type_with_partial_extracts(i32 %i, i32 %n) #0 { -; CHECK-SVE-LABEL: test_illegal_type_with_partial_extracts: +define void @test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts(i32 %i, i32 %n) #0 { +; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts: ; CHECK-SVE: // %bb.0: ; CHECK-SVE-NEXT: rdvl x8, #1 ; CHECK-SVE-NEXT: adds w8, w0, w8 @@ -285,7 +284,7 @@ define void @test_illegal_type_with_partial_extracts(i32 %i, i32 %n) #0 { ; CHECK-SVE-NEXT: punpkhi p1.h, p1.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_illegal_type_with_partial_extracts: +; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts: ; CHECK-SVE2p1: // %bb.0: ; CHECK-SVE2p1-NEXT: mov w8, w1 ; CHECK-SVE2p1-NEXT: mov w9, w0 From ead2b6d3fc8ef1e3fb99371fc9ce8a282957ca3a Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Wed, 4 Jun 2025 10:36:21 +0000 Subject: [PATCH 3/5] - Guard setOperationAction with hasSVE2p1 & add assert --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8cdd7fd6f74a7..0536efacf2792 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1501,7 +1501,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Legal); } - setOperationAction(ISD::GET_ACTIVE_LANE_MASK, MVT::nxv32i1, Custom); + if (Subtarget->hasSVE2p1()) + setOperationAction(ISD::GET_ACTIVE_LANE_MASK, MVT::nxv32i1, Custom); for (auto VT : {MVT::v16i8, MVT::v8i8, MVT::v4i16, MVT::v2i32}) setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Custom); @@ -27332,8 +27333,8 @@ void AArch64TargetLowering::ReplaceExtractSubVectorResults( void AArch64TargetLowering::ReplaceGetActiveLaneMaskResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { - if (!Subtarget->hasSVE2p1()) - return; + assert(Subtarget->hasSVE2p1() && + "Custom lower of get.active.lane.mask missing required feature."); assert(N->getValueType(0) == MVT::nxv32i1 && "Unexpected result type for get.active.lane.mask"); From c71590a3c41cc2c7ab0917c29311d23dbd471d4a Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Wed, 4 Jun 2025 13:10:23 +0000 Subject: [PATCH 4/5] - Extend to cover Subtarget.hasSME2() && Subtarget.isStreaming() - Remove unnecessary tests --- .../Target/AArch64/AArch64ISelLowering.cpp | 8 +- .../AArch64/get-active-lane-mask-extract.ll | 193 ++++++------------ 2 files changed, 66 insertions(+), 135 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 0536efacf2792..e87de292639dd 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1501,7 +1501,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Legal); } - if (Subtarget->hasSVE2p1()) + if (Subtarget->hasSVE2p1() || + (Subtarget->hasSME2() && Subtarget->isStreaming())) setOperationAction(ISD::GET_ACTIVE_LANE_MASK, MVT::nxv32i1, Custom); for (auto VT : {MVT::v16i8, MVT::v8i8, MVT::v4i16, MVT::v2i32}) @@ -18168,7 +18169,7 @@ performActiveLaneMaskCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, /*IsEqual=*/false)) return While; - if (!ST->hasSVE2p1()) + if (!ST->hasSVE2p1() && !(ST->hasSME2() && ST->isStreaming())) return SDValue(); if (!N->hasNUsesOfValue(2, 0)) @@ -27333,7 +27334,8 @@ void AArch64TargetLowering::ReplaceExtractSubVectorResults( void AArch64TargetLowering::ReplaceGetActiveLaneMaskResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { - assert(Subtarget->hasSVE2p1() && + assert((Subtarget->hasSVE2p1() || + (Subtarget->hasSME2() && Subtarget->isStreaming())) && "Custom lower of get.active.lane.mask missing required feature."); assert(N->getValueType(0) == MVT::nxv32i1 && diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll index 7cc4bc0bdc62f..eb74184888ced 100644 --- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll +++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mattr=+sve < %s | FileCheck %s -check-prefix CHECK-SVE -; RUN: llc -mattr=+sve2p1 < %s | FileCheck %s -check-prefix CHECK-SVE2p1 +; RUN: llc -mattr=+sve2p1 < %s | FileCheck %s -check-prefix CHECK-SVE2p1-SME2 -check-prefix CHECK-SVE2p1 +; RUN: llc -mattr=+sve -mattr=+sme2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-SVE2p1-SME2 -check-prefix CHECK-SME2 target triple = "aarch64-linux" ; Test combining of getActiveLaneMask with a pair of extract_vector operations. @@ -13,12 +14,12 @@ define void @test_2x8bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) #0 ; CHECK-SVE-NEXT: punpkhi p1.h, p1.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_2x8bit_mask_with_32bit_index_and_trip_count: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: mov w8, w1 -; CHECK-SVE2p1-NEXT: mov w9, w0 -; CHECK-SVE2p1-NEXT: whilelo { p0.h, p1.h }, x9, x8 -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: mov w8, w1 +; CHECK-SVE2p1-SME2-NEXT: mov w9, w0 +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.h, p1.h }, x9, x8 +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 %i, i32 %n) %v0 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 8) @@ -34,10 +35,10 @@ define void @test_2x8bit_mask_with_64bit_index_and_trip_count(i64 %i, i64 %n) #0 ; CHECK-SVE-NEXT: punpkhi p1.h, p1.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_2x8bit_mask_with_64bit_index_and_trip_count: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: whilelo { p0.h, p1.h }, x0, x1 -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_64bit_index_and_trip_count: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.h, p1.h }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 %i, i64 %n) %v0 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 8) @@ -53,12 +54,12 @@ define void @test_edge_case_2x1bit_mask(i64 %i, i64 %n) #0 { ; CHECK-SVE-NEXT: punpkhi p1.h, p1.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_edge_case_2x1bit_mask: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: whilelo p1.d, x0, x1 -; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b -; CHECK-SVE2p1-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_edge_case_2x1bit_mask: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: whilelo p1.d, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: punpklo p0.h, p1.b +; CHECK-SVE2p1-SME2-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 %i, i64 %n) %v0 = call @llvm.vector.extract.nxv1i1.nxv2i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv1i1.nxv2i1.i64( %r, i64 1) @@ -74,10 +75,10 @@ define void @test_boring_case_2x2bit_mask(i64 %i, i64 %n) #0 { ; CHECK-SVE-NEXT: punpkhi p1.h, p1.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_boring_case_2x2bit_mask: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: whilelo { p0.d, p1.d }, x0, x1 -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_boring_case_2x2bit_mask: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.d, p1.d }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 %i, i64 %n) %v0 = call @llvm.vector.extract.nxv2i1.nxv4i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv2i1.nxv4i1.i64( %r, i64 2) @@ -96,14 +97,14 @@ define void @test_partial_extract(i64 %i, i64 %n) #0 { ; CHECK-SVE-NEXT: punpklo p1.h, p2.b ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_partial_extract: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: whilelo p0.h, x0, x1 -; CHECK-SVE2p1-NEXT: punpklo p1.h, p0.b -; CHECK-SVE2p1-NEXT: punpkhi p2.h, p0.b -; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b -; CHECK-SVE2p1-NEXT: punpklo p1.h, p2.b -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_partial_extract: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: whilelo p0.h, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: punpklo p1.h, p0.b +; CHECK-SVE2p1-SME2-NEXT: punpkhi p2.h, p0.b +; CHECK-SVE2p1-SME2-NEXT: punpklo p0.h, p1.b +; CHECK-SVE2p1-SME2-NEXT: punpklo p1.h, p2.b +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 %i, i64 %n) %v0 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 4) @@ -144,6 +145,21 @@ define void @test_fixed_extract(i64 %i, i64 %n) #0 { ; CHECK-SVE2p1-NEXT: mov v1.s[1], w11 ; CHECK-SVE2p1-NEXT: // kill: def $d1 killed $d1 killed $q1 ; CHECK-SVE2p1-NEXT: b use +; +; CHECK-SME2-LABEL: test_fixed_extract: +; CHECK-SME2: // %bb.0: +; CHECK-SME2-NEXT: whilelo p0.h, x0, x1 +; CHECK-SME2-NEXT: cset w8, mi +; CHECK-SME2-NEXT: mov z0.h, p0/z, #1 // =0x1 +; CHECK-SME2-NEXT: mov z1.h, z0.h[1] +; CHECK-SME2-NEXT: mov z2.h, z0.h[5] +; CHECK-SME2-NEXT: mov z3.h, z0.h[4] +; CHECK-SME2-NEXT: fmov s0, w8 +; CHECK-SME2-NEXT: zip1 z0.s, z0.s, z1.s +; CHECK-SME2-NEXT: zip1 z1.s, z3.s, z2.s +; CHECK-SME2-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-SME2-NEXT: // kill: def $d1 killed $d1 killed $z1 +; CHECK-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 %i, i64 %n) %v0 = call <2 x i1> @llvm.vector.extract.v2i1.nxv8i1.i64( %r, i64 0) %v1 = call <2 x i1> @llvm.vector.extract.v2i1.nxv8i1.i64( %r, i64 4) @@ -163,12 +179,12 @@ define void @test_2x16bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) # ; CHECK-SVE-NEXT: whilelo p1.b, w8, w1 ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: mov w8, w1 -; CHECK-SVE2p1-NEXT: mov w9, w0 -; CHECK-SVE2p1-NEXT: whilelo { p0.b, p1.b }, x9, x8 -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: mov w8, w1 +; CHECK-SVE2p1-SME2-NEXT: mov w9, w0 +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.b, p1.b }, x9, x8 +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) %v0 = call @llvm.vector.extract.nxv16i1.nxv32i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv16i1.nxv32i1.i64( %r, i64 16) @@ -193,16 +209,16 @@ define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) # ; CHECK-SVE-NEXT: whilelo p2.b, w8, w1 ; CHECK-SVE-NEXT: b use ; -; CHECK-SVE2p1-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: rdvl x8, #2 -; CHECK-SVE2p1-NEXT: mov w9, w1 -; CHECK-SVE2p1-NEXT: mov w10, w0 -; CHECK-SVE2p1-NEXT: adds w8, w0, w8 -; CHECK-SVE2p1-NEXT: csinv w8, w8, wzr, lo -; CHECK-SVE2p1-NEXT: whilelo { p0.b, p1.b }, x10, x9 -; CHECK-SVE2p1-NEXT: whilelo { p2.b, p3.b }, x8, x9 -; CHECK-SVE2p1-NEXT: b use +; CHECK-SVE2p1-SME2-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count: +; CHECK-SVE2p1-SME2: // %bb.0: +; CHECK-SVE2p1-SME2-NEXT: rdvl x8, #2 +; CHECK-SVE2p1-SME2-NEXT: mov w9, w1 +; CHECK-SVE2p1-SME2-NEXT: mov w10, w0 +; CHECK-SVE2p1-SME2-NEXT: adds w8, w0, w8 +; CHECK-SVE2p1-SME2-NEXT: csinv w8, w8, wzr, lo +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.b, p1.b }, x10, x9 +; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.b, p3.b }, x8, x9 +; CHECK-SVE2p1-SME2-NEXT: b use %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 %i, i32 %n) %v0 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 16) @@ -212,93 +228,6 @@ define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) # ret void } -define void @test_2x16bit_mask_with_32bit_index_and_trip_count_ext8(i32 %i, i32 %n) #0 { -; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_ext8: -; CHECK-SVE: // %bb.0: -; CHECK-SVE-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-SVE-NEXT: rdvl x8, #1 -; CHECK-SVE-NEXT: adds w8, w0, w8 -; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo -; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 -; CHECK-SVE-NEXT: whilelo p4.b, w8, w1 -; CHECK-SVE-NEXT: punpklo p1.h, p0.b -; CHECK-SVE-NEXT: punpkhi p3.h, p0.b -; CHECK-SVE-NEXT: punpklo p0.h, p1.b -; CHECK-SVE-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE-NEXT: punpklo p2.h, p3.b -; CHECK-SVE-NEXT: punpkhi p3.h, p3.b -; CHECK-SVE-NEXT: bl use -; CHECK-SVE-NEXT: punpklo p1.h, p4.b -; CHECK-SVE-NEXT: punpkhi p3.h, p4.b -; CHECK-SVE-NEXT: punpklo p0.h, p1.b -; CHECK-SVE-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE-NEXT: punpklo p2.h, p3.b -; CHECK-SVE-NEXT: punpkhi p3.h, p3.b -; CHECK-SVE-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; CHECK-SVE-NEXT: b use -; -; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_ext8: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-SVE2p1-NEXT: mov w8, w1 -; CHECK-SVE2p1-NEXT: mov w9, w0 -; CHECK-SVE2p1-NEXT: whilelo { p4.b, p5.b }, x9, x8 -; CHECK-SVE2p1-NEXT: punpklo p1.h, p4.b -; CHECK-SVE2p1-NEXT: punpkhi p3.h, p4.b -; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b -; CHECK-SVE2p1-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE2p1-NEXT: punpklo p2.h, p3.b -; CHECK-SVE2p1-NEXT: punpkhi p3.h, p3.b -; CHECK-SVE2p1-NEXT: bl use -; CHECK-SVE2p1-NEXT: punpklo p1.h, p5.b -; CHECK-SVE2p1-NEXT: punpkhi p3.h, p5.b -; CHECK-SVE2p1-NEXT: punpklo p0.h, p1.b -; CHECK-SVE2p1-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE2p1-NEXT: punpklo p2.h, p3.b -; CHECK-SVE2p1-NEXT: punpkhi p3.h, p3.b -; CHECK-SVE2p1-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; CHECK-SVE2p1-NEXT: b use - %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) - %v0 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 0) - %v1 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 4) - %v2 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 8) - %v3 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 12) - tail call void @use( %v0, %v1, %v2, %v3) - %v4 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 16) - %v5 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 20) - %v6 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 24) - %v7 = call @llvm.vector.extract.nxv4i1.nxv32i1.i64( %r, i64 28) - tail call void @use( %v4, %v5, %v6, %v7) - ret void -} - -define void @test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts(i32 %i, i32 %n) #0 { -; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts: -; CHECK-SVE: // %bb.0: -; CHECK-SVE-NEXT: rdvl x8, #1 -; CHECK-SVE-NEXT: adds w8, w0, w8 -; CHECK-SVE-NEXT: csinv w8, w8, wzr, lo -; CHECK-SVE-NEXT: whilelo p0.b, w0, w1 -; CHECK-SVE-NEXT: whilelo p1.b, w8, w1 -; CHECK-SVE-NEXT: punpkhi p0.h, p0.b -; CHECK-SVE-NEXT: punpkhi p1.h, p1.b -; CHECK-SVE-NEXT: b use -; -; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count_part_extracts: -; CHECK-SVE2p1: // %bb.0: -; CHECK-SVE2p1-NEXT: mov w8, w1 -; CHECK-SVE2p1-NEXT: mov w9, w0 -; CHECK-SVE2p1-NEXT: whilelo { p2.b, p3.b }, x9, x8 -; CHECK-SVE2p1-NEXT: punpkhi p0.h, p2.b -; CHECK-SVE2p1-NEXT: punpkhi p1.h, p3.b -; CHECK-SVE2p1-NEXT: b use - %r = call @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n) - %v0 = call @llvm.vector.extract.nxv8i1.nxv32i1.i64( %r, i64 8) - %v1 = call @llvm.vector.extract.nxv8i1.nxv32i1.i64( %r, i64 24) - tail call void @use( %v0, %v1) - ret void -} - declare void @use(...) attributes #0 = { nounwind } From 3e5f700f37f58de883f4720a0ed0ef57d90b5542 Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Fri, 6 Jun 2025 09:42:25 +0000 Subject: [PATCH 5/5] - Fix intrinsic name in @test_2x32bit_mask_with_32bit_index_and_trip_count --- llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll index eb74184888ced..c76b50d69b877 100644 --- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll +++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll @@ -219,7 +219,7 @@ define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) # ; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.b, p1.b }, x10, x9 ; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.b, p3.b }, x8, x9 ; CHECK-SVE2p1-SME2-NEXT: b use - %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 %i, i32 %n) + %r = call @llvm.get.active.lane.mask.nxv64i1.i32(i32 %i, i32 %n) %v0 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 0) %v1 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 16) %v2 = call @llvm.vector.extract.nxv16i1.nxv64i1.i64( %r, i64 32)