@@ -69,78 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
6969 return RISCVII::getSEWOpNum (MI.getDesc ());
7070}
7171
72- static bool isVectorConfigInstr (const MachineInstr &MI) {
73- return MI.getOpcode () == RISCV::PseudoVSETVLI ||
74- MI.getOpcode () == RISCV::PseudoVSETVLIX0 ||
75- MI.getOpcode () == RISCV::PseudoVSETIVLI;
76- }
77-
78- // / Return true if this is 'vsetvli x0, x0, vtype' which preserves
79- // / VL and only sets VTYPE.
80- static bool isVLPreservingConfig (const MachineInstr &MI) {
81- if (MI.getOpcode () != RISCV::PseudoVSETVLIX0)
82- return false ;
83- assert (RISCV::X0 == MI.getOperand (1 ).getReg ());
84- return RISCV::X0 == MI.getOperand (0 ).getReg ();
85- }
86-
87- static bool isFloatScalarMoveOrScalarSplatInstr (const MachineInstr &MI) {
88- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
89- default :
90- return false ;
91- case RISCV::VFMV_S_F:
92- case RISCV::VFMV_V_F:
93- return true ;
94- }
95- }
96-
97- static bool isVExtractInstr (const MachineInstr &MI) {
98- return RISCV::getRVVMCOpcode (MI.getOpcode ()) == RISCV::RI_VEXTRACT;
99- }
100-
101- static bool isScalarExtractInstr (const MachineInstr &MI) {
102- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
103- default :
104- return false ;
105- case RISCV::VMV_X_S:
106- case RISCV::VFMV_F_S:
107- return true ;
108- }
109- }
110-
111- static bool isScalarInsertInstr (const MachineInstr &MI) {
112- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
113- default :
114- return false ;
115- case RISCV::VMV_S_X:
116- case RISCV::VFMV_S_F:
117- return true ;
118- }
119- }
120-
121- static bool isScalarSplatInstr (const MachineInstr &MI) {
122- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
123- default :
124- return false ;
125- case RISCV::VMV_V_I:
126- case RISCV::VMV_V_X:
127- case RISCV::VFMV_V_F:
128- return true ;
129- }
130- }
131-
132- static bool isVSlideInstr (const MachineInstr &MI) {
133- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
134- default :
135- return false ;
136- case RISCV::VSLIDEDOWN_VX:
137- case RISCV::VSLIDEDOWN_VI:
138- case RISCV::VSLIDEUP_VX:
139- case RISCV::VSLIDEUP_VI:
140- return true ;
141- }
142- }
143-
14472// / Get the EEW for a load or store instruction. Return std::nullopt if MI is
14573// / not a load or store which ignores SEW.
14674static std::optional<unsigned > getEEWForLoadStore (const MachineInstr &MI) {
@@ -170,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
17098 }
17199}
172100
173- static bool isNonZeroLoadImmediate (const MachineInstr &MI) {
174- return MI.getOpcode () == RISCV::ADDI &&
175- MI.getOperand (1 ).isReg () && MI.getOperand (2 ).isImm () &&
176- MI.getOperand (1 ).getReg () == RISCV::X0 &&
177- MI.getOperand (2 ).getImm () != 0 ;
178- }
179-
180101// / Return true if this is an operation on mask registers. Note that
181102// / this includes both arithmetic/logical ops and load/store (vlm/vsm).
182103static bool isMaskRegOp (const MachineInstr &MI) {
@@ -462,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
462383 }
463384
464385 // For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
465- if (isScalarInsertInstr (MI)) {
386+ if (RISCVInstrInfo:: isScalarInsertInstr (MI)) {
466387 Res.LMUL = DemandedFields::LMULNone;
467388 Res.SEWLMULRatio = false ;
468389 Res.VLAny = false ;
@@ -473,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
473394 // tail lanes to either be the original value or -1. We are writing
474395 // unknown bits to the lanes here.
475396 if (hasUndefinedPassthru (MI)) {
476- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
397+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
398+ !ST->hasVInstructionsF64 ())
477399 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
478400 else
479401 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -482,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
482404 }
483405
484406 // vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
485- if (isScalarExtractInstr (MI)) {
407+ if (RISCVInstrInfo:: isScalarExtractInstr (MI)) {
486408 assert (!RISCVII::hasVLOp (TSFlags));
487409 Res.LMUL = DemandedFields::LMULNone;
488410 Res.SEWLMULRatio = false ;
@@ -500,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
500422 // non-zero VL. We could generalize this if we had a VL > C predicate.
501423 // * The LMUL1 restriction is for machines whose latency may depend on VL.
502424 // * As above, this is only legal for tail "undefined" not "agnostic".
503- if (isVSlideInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
504- hasUndefinedPassthru (MI)) {
425+ if (RISCVInstrInfo:: isVSlideInstr (MI) && VLOp.isImm () &&
426+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
505427 Res.VLAny = false ;
506428 Res.VLZeroness = true ;
507429 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -514,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
514436 // it's place. Since a splat is non-constant time in LMUL, we do need to be
515437 // careful to not increase the number of active vector registers (unlike for
516438 // vmv.s.x.)
517- if (isScalarSplatInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
518- hasUndefinedPassthru (MI)) {
439+ if (RISCVInstrInfo:: isScalarSplatInstr (MI) && VLOp.isImm () &&
440+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
519441 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
520442 Res.SEWLMULRatio = false ;
521443 Res.VLAny = false ;
522- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
444+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
445+ !ST->hasVInstructionsF64 ())
523446 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
524447 else
525448 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -542,7 +465,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
542465 Res.MaskPolicy = false ;
543466 }
544467
545- if (isVExtractInstr (MI)) {
468+ if (RISCVInstrInfo:: isVExtractInstr (MI)) {
546469 assert (!RISCVII::hasVLOp (TSFlags));
547470 // TODO: LMUL can be any larger value (without cost)
548471 Res.TailPolicy = false ;
@@ -661,7 +584,7 @@ class VSETVLIInfo {
661584 return getAVLImm () > 0 ;
662585 if (hasAVLReg ()) {
663586 if (auto *DefMI = getAVLDefMI (LIS))
664- return isNonZeroLoadImmediate (*DefMI);
587+ return RISCVInstrInfo:: isNonZeroLoadImmediate (*DefMI);
665588 }
666589 if (hasAVLVLMAX ())
667590 return true ;
@@ -989,7 +912,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
989912 if (!Info.hasAVLReg ())
990913 return ;
991914 const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
992- if (!DefMI || !isVectorConfigInstr (*DefMI))
915+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
993916 return ;
994917 VSETVLIInfo DefInstrInfo = getInfoForVSETVLI (*DefMI);
995918 if (!DefInstrInfo.hasSameVLMAX (Info))
@@ -1095,7 +1018,8 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
10951018 InstrInfo.setAVLRegDef (VNI, VLOp.getReg ());
10961019 }
10971020 } else {
1098- assert (isScalarExtractInstr (MI) || isVExtractInstr (MI));
1021+ assert (RISCVInstrInfo::isScalarExtractInstr (MI) ||
1022+ RISCVInstrInfo::isVExtractInstr (MI));
10991023 // Pick a random value for state tracking purposes, will be ignored via
11001024 // the demanded fields mechanism
11011025 InstrInfo.setAVLImm (1 );
@@ -1136,7 +1060,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
11361060 // same, we can use the X0, X0 form.
11371061 if (Info.hasSameVLMAX (PrevInfo) && Info.hasAVLReg ()) {
11381062 if (const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
1139- DefMI && isVectorConfigInstr (*DefMI)) {
1063+ DefMI && RISCVInstrInfo:: isVectorConfigInstr (*DefMI)) {
11401064 VSETVLIInfo DefInfo = getInfoForVSETVLI (*DefMI);
11411065 if (DefInfo.hasSameAVL (PrevInfo) && DefInfo.hasSameVLMAX (PrevInfo)) {
11421066 auto MI = BuildMI (MBB, InsertPt, DL, TII->get (RISCV::PseudoVSETVLIX0))
@@ -1314,7 +1238,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
13141238// reflect the changes MI might make.
13151239void RISCVInsertVSETVLI::transferAfter (VSETVLIInfo &Info,
13161240 const MachineInstr &MI) const {
1317- if (isVectorConfigInstr (MI)) {
1241+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
13181242 Info = getInfoForVSETVLI (MI);
13191243 return ;
13201244 }
@@ -1349,7 +1273,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
13491273 for (const MachineInstr &MI : MBB) {
13501274 transferBefore (Info, MI);
13511275
1352- if (isVectorConfigInstr (MI) || RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1276+ if (RISCVInstrInfo::isVectorConfigInstr (MI) ||
1277+ RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
13531278 isVectorCopy (ST->getRegisterInfo (), MI))
13541279 HadVectorOp = true ;
13551280
@@ -1439,7 +1364,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
14391364 if (!Value)
14401365 return true ;
14411366 MachineInstr *DefMI = LIS->getInstructionFromIndex (Value->def );
1442- if (!DefMI || !isVectorConfigInstr (*DefMI))
1367+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
14431368 return true ;
14441369
14451370 // We found a VSET(I)VLI make sure it matches the output of the
@@ -1470,7 +1395,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
14701395 transferBefore (CurInfo, MI);
14711396
14721397 // If this is an explicit VSETVLI or VSETIVLI, update our state.
1473- if (isVectorConfigInstr (MI)) {
1398+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
14741399 // Conservatively, mark the VL and VTYPE as live.
14751400 assert (MI.getOperand (3 ).getReg () == RISCV::VL &&
14761401 MI.getOperand (4 ).getReg () == RISCV::VTYPE &&
@@ -1677,12 +1602,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
16771602 // If the VL values aren't equal, return false if either a) the former is
16781603 // demanded, or b) we can't rewrite the former to be the later for
16791604 // implementation reasons.
1680- if (!isVLPreservingConfig (MI)) {
1605+ if (!RISCVInstrInfo:: isVLPreservingConfig (MI)) {
16811606 if (Used.VLAny )
16821607 return false ;
16831608
16841609 if (Used.VLZeroness ) {
1685- if (isVLPreservingConfig (PrevMI))
1610+ if (RISCVInstrInfo:: isVLPreservingConfig (PrevMI))
16861611 return false ;
16871612 if (!getInfoForVSETVLI (PrevMI).hasEquallyZeroAVL (getInfoForVSETVLI (MI),
16881613 LIS))
@@ -1733,7 +1658,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17331658
17341659 for (MachineInstr &MI : make_early_inc_range (reverse (MBB))) {
17351660
1736- if (!isVectorConfigInstr (MI)) {
1661+ if (!RISCVInstrInfo:: isVectorConfigInstr (MI)) {
17371662 Used.doUnion (getDemanded (MI, ST));
17381663 if (MI.isCall () || MI.isInlineAsm () ||
17391664 MI.modifiesRegister (RISCV::VL, /* TRI=*/ nullptr ) ||
@@ -1757,7 +1682,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17571682 }
17581683
17591684 if (canMutatePriorConfig (MI, *NextMI, Used)) {
1760- if (!isVLPreservingConfig (*NextMI)) {
1685+ if (!RISCVInstrInfo:: isVLPreservingConfig (*NextMI)) {
17611686 Register DefReg = NextMI->getOperand (0 ).getReg ();
17621687
17631688 MI.getOperand (0 ).setReg (DefReg);
0 commit comments