diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 7ae68ebadd3e8..f43c120dc1946 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1670,7 +1670,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { : RISCV::PseudoVMSLT_VX_##suffix; \ VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \ : RISCV::PseudoVMSGT_VX_##suffix; \ - VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ + VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix_b; \ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \ break; CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B64) @@ -1770,13 +1770,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \ break; - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4) - CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, B64) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, B32) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, B16) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, B8) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, B4) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, B2) + CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, B1) #undef CASE_VMXOR_VMANDN_VMOR_OPCODES } SDValue SEW = CurDAG->getTargetConstant( diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 4e25d683faee9..81b93ade92576 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2273,11 +2273,10 @@ multiclass VPseudoBinaryV_VI_RM { - foreach m = MxList in { - defvar mx = m.MX; - let VLMul = m.value, isCommutable = Commutable in { - def "_MM_" # mx : VPseudoBinaryNoMask, - SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>; + foreach mti = AllMasks in { + let VLMul = mti.LMul.value, isCommutable = Commutable in { + def "_MM_" # mti.BX : VPseudoBinaryNoMask, + SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>; } } } @@ -4950,7 +4949,7 @@ multiclass VPatBinaryV_VI_RM { foreach mti = AllMasks in let Predicates = [HasVInstructions] in - def : VPatBinaryM; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 021c4b3b724b0..880ea0ae0a976 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1141,35 +1141,35 @@ defm : VPatAVGADD_VV_VX_RM; foreach mti = AllMasks in { let Predicates = [HasVInstructions] in { def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), - (!cast("PseudoVMAND_MM_"#mti.LMul.MX) + (!cast("PseudoVMAND_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), - (!cast("PseudoVMOR_MM_"#mti.LMul.MX) + (!cast("PseudoVMOR_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), - (!cast("PseudoVMXOR_MM_"#mti.LMul.MX) + (!cast("PseudoVMXOR_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), - (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) + (!cast("PseudoVMNAND_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), - (!cast("PseudoVMNOR_MM_"#mti.LMul.MX) + (!cast("PseudoVMNOR_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), - (!cast("PseudoVMXNOR_MM_"#mti.LMul.MX) + (!cast("PseudoVMXNOR_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), - (!cast("PseudoVMANDN_MM_"#mti.LMul.MX) + (!cast("PseudoVMANDN_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), - (!cast("PseudoVMORN_MM_"#mti.LMul.MX) + (!cast("PseudoVMORN_MM_"#mti.BX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. def : Pat<(mti.Mask (rvv_vnot VR:$rs)), - (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) + (!cast("PseudoVMNAND_MM_"#mti.BX) VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index e48a6f9309294..2026ba79e623d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -2699,51 +2699,51 @@ foreach mti = AllMasks in { (!cast("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), - (!cast("PseudoVMAND_MM_" # mti.LMul.MX) + (!cast("PseudoVMAND_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), - (!cast("PseudoVMOR_MM_" # mti.LMul.MX) + (!cast("PseudoVMOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), - (!cast("PseudoVMXOR_MM_" # mti.LMul.MX) + (!cast("PseudoVMXOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMANDN_MM_" # mti.LMul.MX) + (!cast("PseudoVMANDN_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMORN_MM_" # mti.LMul.MX) + (!cast("PseudoVMORN_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; // XOR is associative so we need 2 patterns for VMXNOR. def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, VLOpFrag), VR:$rs2, VLOpFrag)), - (!cast("PseudoVMXNOR_MM_" # mti.LMul.MX) + (!cast("PseudoVMXNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMNAND_MM_" # mti.LMul.MX) + (!cast("PseudoVMNAND_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMNOR_MM_" # mti.LMul.MX) + (!cast("PseudoVMNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMXNOR_MM_" # mti.LMul.MX) + (!cast("PseudoVMXNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; // Match the not idiom to the vmnot.m pseudo. def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), - (!cast("PseudoVMNAND_MM_" # mti.LMul.MX) + (!cast("PseudoVMNAND_MM_" # mti.BX) VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; // 15.2 Vector count population in mask vcpop.m diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir index 912fc2ccbd127..5f23823d10103 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir @@ -547,19 +547,19 @@ name: vmop_mm body: | bb.0: ; CHECK-LABEL: name: vmop_mm - ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */ - ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */ - %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 - %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 + ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */ + ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */ + %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 + %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 ... --- name: vmop_mm_incompatible_eew body: | bb.0: ; CHECK-LABEL: name: vmop_mm_incompatible_eew - ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */ + ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ - %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 + %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 ... --- @@ -567,19 +567,19 @@ name: vmop_mm_incompatible_emul body: | bb.0: ; CHECK-LABEL: name: vmop_mm_incompatible_emul - ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */ - ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */ - %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 - %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 + ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */ + ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */ + %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 + %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 ... --- name: vmop_mm_mask body: | bb.0: ; CHECK-LABEL: name: vmop_mm_mask - ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */ + ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ - %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 + %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 ... --- @@ -587,9 +587,9 @@ name: vmop_mm_mask_larger_emul_user body: | bb.0: ; CHECK-LABEL: name: vmop_mm_mask_larger_emul_user - ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */ + ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */ ; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */ - %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 + %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 ... --- @@ -597,9 +597,9 @@ name: vmop_mm_mask_incompatible_emul body: | bb.0: ; CHECK-LABEL: name: vmop_mm_mask_incompatible_emul - ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */ + ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ - %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 + %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 ... --- @@ -608,9 +608,9 @@ body: | bb.0: ; CHECK-LABEL: name: vmop_vv ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */ - ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */ + ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */ %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */ - %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 + %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 ... --- name: vmop_vv_maskuser @@ -638,9 +638,9 @@ body: | bb.0: ; CHECK-LABEL: name: vmop_vv_incompatible_emul ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */ - ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */ + ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */ %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */ - %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 + %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 ... --- name: vmop_vv_maskuser_incompaible_emul diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index 6f97abcd0fade..449ee44d5dc5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -822,7 +822,7 @@ body: | ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: BEQ %a, $x0, %bb.3 ; CHECK-NEXT: PseudoBR %bb.2 ; CHECK-NEXT: {{ $}} @@ -857,7 +857,7 @@ body: | bb.1: successors: %bb.3, %bb.2 - %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0 + %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0 %t2:gpr = COPY $x0 BEQ %a, %t2, %bb.3 PseudoBR %bb.2