From 9f6186c41f8dfbbf4e313cbcba807425f7f494f5 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 8 Dec 2025 14:07:45 -0800 Subject: [PATCH] [RISCV] Add VMNoV0 register class with only the VMaskVTs. I plan to use this for inline assembly "vd" contraints with mask types in a follow up patch. Due to the test changes I wanted to post this separately. --- llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 1 + .../instruction-select/rvv/select.mir | 20 +++++++++---------- .../RISCV/rvv/pass-fast-math-flags-sdnode.ll | 2 +- .../RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir | 4 ++-- .../rvv/strided-vpload-vpstore-output.ll | 2 +- .../RISCV/rvv/vleff-vlseg2ff-output.ll | 4 ++-- .../CodeGen/RISCV/rvv/vmerge-peephole.mir | 4 ++-- .../CodeGen/RISCV/rvv/vmv.v.v-peephole.mir | 6 +++--- .../RISCV/rvv/vsetvli-insert-crossbb.mir | 4 ++-- 9 files changed, 24 insertions(+), 23 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index 11b7a0a3c691a..f354793eb0eac 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -813,6 +813,7 @@ def VMV0 : VReg; // The register class is added for inline assembly for vector mask types. def VM : VReg; +def VMNoV0 : VReg; defvar VTupM1N2VTs = [riscv_nxv8i8x2, riscv_nxv4i8x2, riscv_nxv2i8x2, riscv_nxv1i8x2]; defvar VTupM1N3VTs = [riscv_nxv8i8x3, riscv_nxv4i8x3, riscv_nxv2i8x3, riscv_nxv1i8x3]; diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir index ada76a43639d7..b7cb295648b4e 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir @@ -11,7 +11,7 @@ body: | bb.0.entry: ; RV32I-LABEL: name: select_nxv1i8 ; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]] @@ -19,7 +19,7 @@ body: | ; ; RV64I-LABEL: name: select_nxv1i8 ; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]] @@ -40,7 +40,7 @@ body: | bb.0.entry: ; RV32I-LABEL: name: select_nxv4i8 ; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]] @@ -48,7 +48,7 @@ body: | ; ; RV64I-LABEL: name: select_nxv4i8 ; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]] @@ -98,7 +98,7 @@ body: | bb.0.entry: ; RV32I-LABEL: name: select_nxv64i8 ; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]] @@ -106,7 +106,7 @@ body: | ; ; RV64I-LABEL: name: select_nxv64i8 ; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]] @@ -127,7 +127,7 @@ body: | bb.0.entry: ; RV32I-LABEL: name: select_nxv2i16 ; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]] @@ -135,7 +135,7 @@ body: | ; ; RV64I-LABEL: name: select_nxv2i16 ; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]] @@ -185,7 +185,7 @@ body: | bb.0.entry: ; RV32I-LABEL: name: select_nxv32i16 ; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]] @@ -193,7 +193,7 @@ body: | ; ; RV64I-LABEL: name: select_nxv32i16 ; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF - ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]] diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll index 0654fe8bd8d66..3225d649f066e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -13,7 +13,7 @@ define @foo( %x, @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir index c73c2004834db..ece457a09dbdf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir @@ -11,7 +11,7 @@ body: | ; CHECK: liveins: $x1, $v8, $v9 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %false:vrnov0 = COPY $v8 - ; CHECK-NEXT: %true:vrnov0 = COPY $v9 + ; CHECK-NEXT: %true:vmnov0 = COPY $v9 ; CHECK-NEXT: %avl:gprnox0 = COPY $x1 ; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 0 /* e8 */ ; CHECK-NEXT: $v0 = COPY %mask @@ -135,7 +135,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %false:vrnov0 = COPY $v8 ; CHECK-NEXT: %mask:vmv0 = COPY $v0 - ; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 1 /* ta, mu */ + ; CHECK-NEXT: %true:vmnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 1 /* ta, mu */ %false:vrnov0 = COPY $v8 %mask:vmv0 = COPY $v0 %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, 4, 5 /* e32 */, 0 /* tu, mu */ diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll index f087efcc5f57b..d3649ef4b6664 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll @@ -15,7 +15,7 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1) + ; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vmnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1) ; CHECK-NEXT: $v8 = COPY [[PseudoVLSE8_V_MF8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8 %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index 6b6276b838fba..7cbaceae858b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -42,9 +42,9 @@ define i64 @test_vleff_nxv8i8_mask( %maskedoff, ptr %p,