@@ -69,74 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
6969 return RISCVII::getSEWOpNum (MI.getDesc ());
7070}
7171
72- static bool isVectorConfigInstr (const MachineInstr &MI) {
73- return MI.getOpcode () == RISCV::PseudoVSETVLI ||
74- MI.getOpcode () == RISCV::PseudoVSETVLIX0 ||
75- MI.getOpcode () == RISCV::PseudoVSETIVLI;
76- }
77-
78- // / Return true if this is 'vsetvli x0, x0, vtype' which preserves
79- // / VL and only sets VTYPE.
80- static bool isVLPreservingConfig (const MachineInstr &MI) {
81- if (MI.getOpcode () != RISCV::PseudoVSETVLIX0)
82- return false ;
83- assert (RISCV::X0 == MI.getOperand (1 ).getReg ());
84- return RISCV::X0 == MI.getOperand (0 ).getReg ();
85- }
86-
87- static bool isFloatScalarMoveOrScalarSplatInstr (const MachineInstr &MI) {
88- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
89- default :
90- return false ;
91- case RISCV::VFMV_S_F:
92- case RISCV::VFMV_V_F:
93- return true ;
94- }
95- }
96-
97- static bool isScalarExtractInstr (const MachineInstr &MI) {
98- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
99- default :
100- return false ;
101- case RISCV::VMV_X_S:
102- case RISCV::VFMV_F_S:
103- return true ;
104- }
105- }
106-
107- static bool isScalarInsertInstr (const MachineInstr &MI) {
108- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
109- default :
110- return false ;
111- case RISCV::VMV_S_X:
112- case RISCV::VFMV_S_F:
113- return true ;
114- }
115- }
116-
117- static bool isScalarSplatInstr (const MachineInstr &MI) {
118- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
119- default :
120- return false ;
121- case RISCV::VMV_V_I:
122- case RISCV::VMV_V_X:
123- case RISCV::VFMV_V_F:
124- return true ;
125- }
126- }
127-
128- static bool isVSlideInstr (const MachineInstr &MI) {
129- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
130- default :
131- return false ;
132- case RISCV::VSLIDEDOWN_VX:
133- case RISCV::VSLIDEDOWN_VI:
134- case RISCV::VSLIDEUP_VX:
135- case RISCV::VSLIDEUP_VI:
136- return true ;
137- }
138- }
139-
14072// / Get the EEW for a load or store instruction. Return std::nullopt if MI is
14173// / not a load or store which ignores SEW.
14274static std::optional<unsigned > getEEWForLoadStore (const MachineInstr &MI) {
@@ -166,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
16698 }
16799}
168100
169- static bool isNonZeroLoadImmediate (const MachineInstr &MI) {
170- return MI.getOpcode () == RISCV::ADDI &&
171- MI.getOperand (1 ).isReg () && MI.getOperand (2 ).isImm () &&
172- MI.getOperand (1 ).getReg () == RISCV::X0 &&
173- MI.getOperand (2 ).getImm () != 0 ;
174- }
175-
176101// / Return true if this is an operation on mask registers. Note that
177102// / this includes both arithmetic/logical ops and load/store (vlm/vsm).
178103static bool isMaskRegOp (const MachineInstr &MI) {
@@ -458,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
458383 }
459384
460385 // For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
461- if (isScalarInsertInstr (MI)) {
386+ if (RISCVInstrInfo:: isScalarInsertInstr (MI)) {
462387 Res.LMUL = DemandedFields::LMULNone;
463388 Res.SEWLMULRatio = false ;
464389 Res.VLAny = false ;
@@ -469,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
469394 // tail lanes to either be the original value or -1. We are writing
470395 // unknown bits to the lanes here.
471396 if (hasUndefinedPassthru (MI)) {
472- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
397+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
398+ !ST->hasVInstructionsF64 ())
473399 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
474400 else
475401 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -478,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
478404 }
479405
480406 // vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
481- if (isScalarExtractInstr (MI)) {
407+ if (RISCVInstrInfo:: isScalarExtractInstr (MI)) {
482408 assert (!RISCVII::hasVLOp (TSFlags));
483409 Res.LMUL = DemandedFields::LMULNone;
484410 Res.SEWLMULRatio = false ;
@@ -496,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
496422 // non-zero VL. We could generalize this if we had a VL > C predicate.
497423 // * The LMUL1 restriction is for machines whose latency may depend on VL.
498424 // * As above, this is only legal for tail "undefined" not "agnostic".
499- if (isVSlideInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
500- hasUndefinedPassthru (MI)) {
425+ if (RISCVInstrInfo:: isVSlideInstr (MI) && VLOp.isImm () &&
426+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
501427 Res.VLAny = false ;
502428 Res.VLZeroness = true ;
503429 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -510,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
510436 // it's place. Since a splat is non-constant time in LMUL, we do need to be
511437 // careful to not increase the number of active vector registers (unlike for
512438 // vmv.s.x.)
513- if (isScalarSplatInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
514- hasUndefinedPassthru (MI)) {
439+ if (RISCVInstrInfo:: isScalarSplatInstr (MI) && VLOp.isImm () &&
440+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
515441 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
516442 Res.SEWLMULRatio = false ;
517443 Res.VLAny = false ;
518- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
444+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
445+ !ST->hasVInstructionsF64 ())
519446 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
520447 else
521448 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -651,7 +578,7 @@ class VSETVLIInfo {
651578 return getAVLImm () > 0 ;
652579 if (hasAVLReg ()) {
653580 if (auto *DefMI = getAVLDefMI (LIS))
654- return isNonZeroLoadImmediate (*DefMI);
581+ return RISCVInstrInfo:: isNonZeroLoadImmediate (*DefMI);
655582 }
656583 if (hasAVLVLMAX ())
657584 return true ;
@@ -979,7 +906,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
979906 if (!Info.hasAVLReg ())
980907 return ;
981908 const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
982- if (!DefMI || !isVectorConfigInstr (*DefMI))
909+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
983910 return ;
984911 VSETVLIInfo DefInstrInfo = getInfoForVSETVLI (*DefMI);
985912 if (!DefInstrInfo.hasSameVLMAX (Info))
@@ -1085,7 +1012,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
10851012 InstrInfo.setAVLRegDef (VNI, VLOp.getReg ());
10861013 }
10871014 } else {
1088- assert (isScalarExtractInstr (MI));
1015+ assert (RISCVInstrInfo:: isScalarExtractInstr (MI));
10891016 // Pick a random value for state tracking purposes, will be ignored via
10901017 // the demanded fields mechanism
10911018 InstrInfo.setAVLImm (1 );
@@ -1126,7 +1053,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
11261053 // same, we can use the X0, X0 form.
11271054 if (Info.hasSameVLMAX (PrevInfo) && Info.hasAVLReg ()) {
11281055 if (const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
1129- DefMI && isVectorConfigInstr (*DefMI)) {
1056+ DefMI && RISCVInstrInfo:: isVectorConfigInstr (*DefMI)) {
11301057 VSETVLIInfo DefInfo = getInfoForVSETVLI (*DefMI);
11311058 if (DefInfo.hasSameAVL (PrevInfo) && DefInfo.hasSameVLMAX (PrevInfo)) {
11321059 auto MI = BuildMI (MBB, InsertPt, DL, TII->get (RISCV::PseudoVSETVLIX0))
@@ -1304,7 +1231,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
13041231// reflect the changes MI might make.
13051232void RISCVInsertVSETVLI::transferAfter (VSETVLIInfo &Info,
13061233 const MachineInstr &MI) const {
1307- if (isVectorConfigInstr (MI)) {
1234+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
13081235 Info = getInfoForVSETVLI (MI);
13091236 return ;
13101237 }
@@ -1339,7 +1266,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
13391266 for (const MachineInstr &MI : MBB) {
13401267 transferBefore (Info, MI);
13411268
1342- if (isVectorConfigInstr (MI) || RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1269+ if (RISCVInstrInfo::isVectorConfigInstr (MI) ||
1270+ RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
13431271 isVectorCopy (ST->getRegisterInfo (), MI))
13441272 HadVectorOp = true ;
13451273
@@ -1429,7 +1357,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
14291357 if (!Value)
14301358 return true ;
14311359 MachineInstr *DefMI = LIS->getInstructionFromIndex (Value->def );
1432- if (!DefMI || !isVectorConfigInstr (*DefMI))
1360+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
14331361 return true ;
14341362
14351363 // We found a VSET(I)VLI make sure it matches the output of the
@@ -1460,7 +1388,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
14601388 transferBefore (CurInfo, MI);
14611389
14621390 // If this is an explicit VSETVLI or VSETIVLI, update our state.
1463- if (isVectorConfigInstr (MI)) {
1391+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
14641392 // Conservatively, mark the VL and VTYPE as live.
14651393 assert (MI.getOperand (3 ).getReg () == RISCV::VL &&
14661394 MI.getOperand (4 ).getReg () == RISCV::VTYPE &&
@@ -1660,12 +1588,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
16601588 // If the VL values aren't equal, return false if either a) the former is
16611589 // demanded, or b) we can't rewrite the former to be the later for
16621590 // implementation reasons.
1663- if (!isVLPreservingConfig (MI)) {
1591+ if (!RISCVInstrInfo:: isVLPreservingConfig (MI)) {
16641592 if (Used.VLAny )
16651593 return false ;
16661594
16671595 if (Used.VLZeroness ) {
1668- if (isVLPreservingConfig (PrevMI))
1596+ if (RISCVInstrInfo:: isVLPreservingConfig (PrevMI))
16691597 return false ;
16701598 if (!getInfoForVSETVLI (PrevMI).hasEquallyZeroAVL (getInfoForVSETVLI (MI),
16711599 LIS))
@@ -1716,7 +1644,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17161644
17171645 for (MachineInstr &MI : make_early_inc_range (reverse (MBB))) {
17181646
1719- if (!isVectorConfigInstr (MI)) {
1647+ if (!RISCVInstrInfo:: isVectorConfigInstr (MI)) {
17201648 Used.doUnion (getDemanded (MI, ST));
17211649 if (MI.isCall () || MI.isInlineAsm () ||
17221650 MI.modifiesRegister (RISCV::VL, /* TRI=*/ nullptr ) ||
@@ -1740,7 +1668,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17401668 }
17411669
17421670 if (canMutatePriorConfig (MI, *NextMI, Used)) {
1743- if (!isVLPreservingConfig (*NextMI)) {
1671+ if (!RISCVInstrInfo:: isVLPreservingConfig (*NextMI)) {
17441672 Register DefReg = NextMI->getOperand (0 ).getReg ();
17451673
17461674 MI.getOperand (0 ).setReg (DefReg);
0 commit comments