diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp index 72f1596d79a02..4fb71a3ed0006 100644 --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -1101,7 +1101,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI, if (RISCVII::hasRoundModeOp(TSFlags)) --NumOps; - bool hasVLOutput = RISCV::isFaultFirstLoad(*MI); + bool hasVLOutput = RISCVInstrInfo::isFaultOnlyFirstLoad(*MI); for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) { const MachineOperand &MO = MI->getOperand(OpNo); // Skip vl output. It should be the second output. diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 53192e9dfe6c6..9a513891b765d 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1253,7 +1253,7 @@ void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info, return; } - if (RISCV::isFaultFirstLoad(MI)) { + if (RISCVInstrInfo::isFaultOnlyFirstLoad(MI)) { // Update AVL to vl-output of the fault first load. assert(MI.getOperand(1).getReg().isVirtual()); if (LIS) { @@ -1756,7 +1756,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const { void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) { for (auto I = MBB.begin(), E = MBB.end(); I != E;) { MachineInstr &MI = *I++; - if (RISCV::isFaultFirstLoad(MI)) { + if (RISCVInstrInfo::isFaultOnlyFirstLoad(MI)) { Register VLOutput = MI.getOperand(1).getReg(); assert(VLOutput.isVirtual()); if (!MI.getOperand(1).isDead()) { @@ -1774,6 +1774,7 @@ void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) { } // We don't use the vl output of the VLEFF/VLSEGFF anymore. MI.getOperand(1).setReg(RISCV::X0); + MI.addRegisterDefined(RISCV::VL, MRI->getTargetRegisterInfo()); } } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 86a4e8e370ee6..e5d29e1a8b476 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -4568,11 +4568,6 @@ RISCV::isRVVSpillForZvlsseg(unsigned Opcode) { } } -bool RISCV::isFaultFirstLoad(const MachineInstr &MI) { - return MI.getNumExplicitDefs() == 2 && - MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) && !MI.isInlineAsm(); -} - bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) { int16_t MI1FrmOpIdx = RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index b099acd81e995..8260949cf918a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -357,8 +357,6 @@ bool isRVVSpill(const MachineInstr &MI); std::optional> isRVVSpillForZvlsseg(unsigned Opcode); -bool isFaultFirstLoad(const MachineInstr &MI); - // Return true if both input instructions have equal rounding mode. If at least // one of the instructions does not have rounding mode, false will be returned. bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 281f8d55932b9..f9fc6f0be3804 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -6144,8 +6144,6 @@ defm PseudoVSUX : VPseudoIStore; // 7.7. Unit-stride Fault-Only-First Loads //===----------------------------------------------------------------------===// -// vleff may update VL register -let Defs = [VL] in defm PseudoVL : VPseudoFFLoad; //===----------------------------------------------------------------------===// @@ -6159,11 +6157,7 @@ defm PseudoVSSEG : VPseudoUSSegStore; defm PseudoVSSSEG : VPseudoSSegStore; defm PseudoVSOXSEG : VPseudoISegStore; defm PseudoVSUXSEG : VPseudoISegStore; - -// vlsegeff.v may update VL register -let Defs = [VL] in { defm PseudoVLSEG : VPseudoUSSegLoadFF; -} //===----------------------------------------------------------------------===// // 11. Vector Integer Arithmetic Instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td index 4c37cb7e393bf..1057eeee31d65 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td +++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td @@ -129,6 +129,13 @@ def isVSlideInstr !instances("^PseudoVSLIDEUP_VI.*") ])>>>; +def isFaultOnlyFirstLoad + : TIIPredicate<"isFaultOnlyFirstLoad", + MCReturnStatement< + CheckOpcode< + !instances( + "^PseudoVL(SEG[2-8])?E(8|16|32|64)FF_V.*")>>>; + def isNonZeroLoadImmediate : TIIPredicate<"isNonZeroLoadImmediate", MCReturnStatement %passthru, ptr %p, i64 %vl) { ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -50,7 +50,7 @@ define i64 @test_vleff_nxv8i8_mask( %maskedoff, ptr %p, , iXLen } @llvm.riscv.vleff.nxv1f64( @@ -3034,8 +3031,6 @@ entry: define void @intrinsic_vleff_mask_dead_all( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll index d8bff08ea5513..333ba83f69eef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -66,8 +66,6 @@ entry: define void @test_vlseg2ff_dead_all(ptr %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) @@ -77,8 +75,6 @@ entry: define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll index 05a5be295cc71..b9e45cc190a65 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -66,8 +66,6 @@ entry: define void @test_vlseg2ff_dead_all(ptr %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) @@ -77,8 +75,6 @@ entry: define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4)