diff --git a/llvm/include/llvm/CodeGen/LiveRegUnits.h b/llvm/include/llvm/CodeGen/LiveRegUnits.h index e96165d6b3bbf..e38aab6d51475 100644 --- a/llvm/include/llvm/CodeGen/LiveRegUnits.h +++ b/llvm/include/llvm/CodeGen/LiveRegUnits.h @@ -112,13 +112,13 @@ class LiveRegUnits { /// The regmask has the same format as the one in the RegMask machine operand. void addRegsInMask(const uint32_t *RegMask); - /// Returns true if no part of physical register \p Reg is live. - bool available(MCPhysReg Reg) const { - for (MCRegUnit Unit : TRI->regunits(Reg)) { - if (Units.test(Unit)) - return false; - } - return true; + /// Returns true if no part of physical register \p Reg is live or reserved. + bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const; + + /// Returns true if any part of physical register \p Reg is live + bool contains(MCPhysReg Reg) const { + return llvm::any_of(TRI->regunits(Reg), + [&](MCRegUnit Unit) { return Units.test(Unit); }); } /// Updates liveness when stepping backwards over the instruction \p MI. diff --git a/llvm/include/llvm/CodeGen/MachineOutliner.h b/llvm/include/llvm/CodeGen/MachineOutliner.h index d0ff02fea4ff9..f6ecb890accd1 100644 --- a/llvm/include/llvm/CodeGen/MachineOutliner.h +++ b/llvm/include/llvm/CodeGen/MachineOutliner.h @@ -156,7 +156,7 @@ struct Candidate { const TargetRegisterInfo &TRI) { if (!FromEndOfBlockToStartOfSeqWasSet) initFromEndOfBlockToStartOfSeq(TRI); - return FromEndOfBlockToStartOfSeq.available(Reg); + return !FromEndOfBlockToStartOfSeq.contains(Reg); } /// \returns True if `isAvailableAcrossAndOutOfSeq` fails for any register @@ -166,7 +166,7 @@ struct Candidate { if (!FromEndOfBlockToStartOfSeqWasSet) initFromEndOfBlockToStartOfSeq(TRI); return any_of(Regs, [&](Register Reg) { - return !FromEndOfBlockToStartOfSeq.available(Reg); + return FromEndOfBlockToStartOfSeq.contains(Reg); }); } @@ -181,7 +181,7 @@ struct Candidate { bool isAvailableInsideSeq(Register Reg, const TargetRegisterInfo &TRI) { if (!InSeqWasSet) initInSeq(TRI); - return InSeq.available(Reg); + return !InSeq.contains(Reg); } /// The number of instructions that would be saved by outlining every diff --git a/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp index 6a7de3b241fee..e3c5c57ec068c 100644 --- a/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -79,7 +79,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const { Register Reg = MO.getReg(); if (Reg.isPhysical()) { // Don't delete live physreg defs, or any reserved register defs. - if (!LivePhysRegs.available(Reg) || MRI->isReserved(Reg)) + if (!LivePhysRegs.available(*MRI, Reg)) return false; } else { if (MO.isDead()) { diff --git a/llvm/lib/CodeGen/LiveRegUnits.cpp b/llvm/lib/CodeGen/LiveRegUnits.cpp index 34de09dd2944b..d711cd44f260d 100644 --- a/llvm/lib/CodeGen/LiveRegUnits.cpp +++ b/llvm/lib/CodeGen/LiveRegUnits.cpp @@ -41,6 +41,11 @@ void LiveRegUnits::addRegsInMask(const uint32_t *RegMask) { } } +bool LiveRegUnits::available(const MachineRegisterInfo &MRI, + MCPhysReg Reg) const { + return !MRI.isReserved(Reg) && !contains(Reg); +} + void LiveRegUnits::stepBackward(const MachineInstr &MI) { // Remove defined registers and regmask kills from the set. for (const MachineOperand &MOP : MI.operands()) { diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index b4cbb93d758ef..c73d17dc2f1a6 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -1627,7 +1627,7 @@ static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg, const TargetRegisterInfo *TRI) { LiveRegUnits LiveInRegUnits(*TRI); LiveInRegUnits.addLiveIns(MBB); - return !LiveInRegUnits.available(Reg); + return LiveInRegUnits.contains(Reg); } static MachineBasicBlock * @@ -1680,7 +1680,7 @@ static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, for (auto U : UsedOpsInCopy) { MachineOperand &MO = MI->getOperand(U); Register SrcReg = MO.getReg(); - if (!UsedRegUnits.available(SrcReg)) { + if (UsedRegUnits.contains(SrcReg)) { MachineBasicBlock::iterator NI = std::next(MI->getIterator()); for (MachineInstr &UI : make_range(NI, CurBB.end())) { if (UI.killsRegister(SrcReg, TRI)) { @@ -1725,7 +1725,7 @@ static bool hasRegisterDependency(MachineInstr *MI, if (!Reg) continue; if (MO.isDef()) { - if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) { + if (ModifiedRegUnits.contains(Reg) || UsedRegUnits.contains(Reg)) { HasRegDependency = true; break; } @@ -1736,7 +1736,7 @@ static bool hasRegisterDependency(MachineInstr *MI, // it's not perfectly clear if skipping the internal read is safe in all // other targets. } else if (MO.isUse()) { - if (!ModifiedRegUnits.available(Reg)) { + if (ModifiedRegUnits.contains(Reg)) { HasRegDependency = true; break; } diff --git a/llvm/lib/CodeGen/RegisterScavenging.cpp b/llvm/lib/CodeGen/RegisterScavenging.cpp index e6ff5701bc4bd..938fc9ba215c7 100644 --- a/llvm/lib/CodeGen/RegisterScavenging.cpp +++ b/llvm/lib/CodeGen/RegisterScavenging.cpp @@ -109,7 +109,7 @@ void RegScavenger::backward() { bool RegScavenger::isRegUsed(Register Reg, bool includeReserved) const { if (isReserved(Reg)) return includeReserved; - return !LiveUnits.available(Reg); + return LiveUnits.contains(Reg); } Register RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const { @@ -164,8 +164,8 @@ findSurvivorBackwards(const MachineRegisterInfo &MRI, if (I == To) { // See if one of the registers in RC wasn't used so far. for (MCPhysReg Reg : AllocationOrder) { - if (!MRI.isReserved(Reg) && Used.available(Reg) && - LiveOut.available(Reg)) + if (!MRI.isReserved(Reg) && !Used.contains(Reg) && + !LiveOut.contains(Reg)) return std::make_pair(Reg, MBB.end()); } // Otherwise we will continue up to InstrLimit instructions to find @@ -186,10 +186,10 @@ findSurvivorBackwards(const MachineRegisterInfo &MRI, MI.getFlag(MachineInstr::FrameSetup)) break; - if (Survivor == 0 || !Used.available(Survivor)) { + if (Survivor == 0 || Used.contains(Survivor)) { MCPhysReg AvilableReg = 0; for (MCPhysReg Reg : AllocationOrder) { - if (!MRI.isReserved(Reg) && Used.available(Reg)) { + if (Used.available(MRI, Reg)) { AvilableReg = Reg; break; } diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp index ea8e30269ece2..17e5a60438f90 100644 --- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -518,7 +518,7 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C, unsigned RegClassID = ChainBegin->getDesc().operands()[0].RegClass; auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID)); for (auto Reg : Ord) { - if (!Units.available(Reg)) + if (Units.contains(Reg)) continue; if (C == getColor(Reg)) return Reg; diff --git a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp index 32686b25f2118..7d481efb0136e 100644 --- a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -748,7 +748,7 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { } for (unsigned ScratchReg : AArch64::GPR64RegClass) { - if (!LR.available(ScratchReg) || MRI.isReserved(ScratchReg)) + if (!LR.available(MRI, ScratchReg)) continue; LoadInfo NewLdI(LdI); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index a41ac0e44a770..61294bb2043bc 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -7838,8 +7838,8 @@ AArch64InstrInfo::getOutlinableRanges(MachineBasicBlock &MBB, // where these registers are dead. We will only outline from those ranges. LiveRegUnits LRU(getRegisterInfo()); auto AreAllUnsafeRegsDead = [&LRU]() { - return LRU.available(AArch64::W16) && LRU.available(AArch64::W17) && - LRU.available(AArch64::NZCV); + return !LRU.contains(AArch64::W16) && !LRU.contains(AArch64::W17) && + !LRU.contains(AArch64::NZCV); }; // We need to know if LR is live across an outlining boundary later on in @@ -7909,7 +7909,7 @@ AArch64InstrInfo::getOutlinableRanges(MachineBasicBlock &MBB, CreateNewRangeStartingAt(MI.getIterator()); continue; } - LRAvailableEverywhere &= LRU.available(AArch64::LR); + LRAvailableEverywhere &= !LRU.contains(AArch64::LR); RangeBegin = MI.getIterator(); ++RangeLen; } diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 41af5522d967d..14d1e873dbac2 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -1265,7 +1265,7 @@ bool AArch64LoadStoreOpt::findMatchingStore( BaseReg == AArch64InstrInfo::getLdStBaseOp(MI).getReg() && AArch64InstrInfo::getLdStOffsetOp(MI).isImm() && isLdOffsetInRangeOfSt(LoadMI, MI, TII) && - ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) { + !ModifiedRegUnits.contains(getLdStRegOp(MI).getReg())) { StoreI = MBBI; return true; } @@ -1278,7 +1278,7 @@ bool AArch64LoadStoreOpt::findMatchingStore( // Otherwise, if the base register is modified, we have no match, so // return early. - if (!ModifiedRegUnits.available(BaseReg)) + if (ModifiedRegUnits.contains(BaseReg)) return false; // If we encounter a store aliased with the load, return early. @@ -1510,7 +1510,7 @@ static std::optional tryToFindRegisterToRename( auto *RegClass = TRI->getMinimalPhysRegClass(Reg); for (const MCPhysReg &PR : *RegClass) { - if (DefinedInBB.available(PR) && UsedInBetween.available(PR) && + if (!DefinedInBB.contains(PR) && !UsedInBetween.contains(PR) && !RegInfo.isReserved(PR) && !AnySubOrSuperRegCalleePreserved(PR) && CanBeUsedForAllClasses(PR)) { DefinedInBB.addReg(PR); @@ -1615,9 +1615,9 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, // can't be paired: bail and keep looking. if (IsPreLdSt) { bool IsOutOfBounds = MIOffset != TII->getMemScale(MI); - bool IsBaseRegUsed = !UsedRegUnits.available( + bool IsBaseRegUsed = UsedRegUnits.contains( AArch64InstrInfo::getLdStBaseOp(MI).getReg()); - bool IsBaseRegModified = !ModifiedRegUnits.available( + bool IsBaseRegModified = ModifiedRegUnits.contains( AArch64InstrInfo::getLdStBaseOp(MI).getReg()); // If the stored value and the address of the second instruction is // the same, it needs to be using the updated register and therefore @@ -1694,16 +1694,16 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, // ldr x2 [x3] // ldr x4 [x2, #8], // the first and third ldr cannot be converted to ldp x1, x4, [x2] - if (!ModifiedRegUnits.available(BaseReg)) + if (ModifiedRegUnits.contains(BaseReg)) return E; // If the Rt of the second instruction was not modified or used between // the two instructions and none of the instructions between the second // and first alias with the second, we can combine the second into the // first. - if (ModifiedRegUnits.available(getLdStRegOp(MI).getReg()) && + if (!ModifiedRegUnits.contains(getLdStRegOp(MI).getReg()) && !(MI.mayLoad() && - !UsedRegUnits.available(getLdStRegOp(MI).getReg())) && + UsedRegUnits.contains(getLdStRegOp(MI).getReg())) && !mayAlias(MI, MemInsns, AA)) { Flags.setMergeForward(false); @@ -1716,10 +1716,10 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, // first and the second alias with the first, we can combine the first // into the second. if (!(MayLoad && - !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg())) && + UsedRegUnits.contains(getLdStRegOp(FirstMI).getReg())) && !mayAlias(FirstMI, MemInsns, AA)) { - if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg())) { + if (!ModifiedRegUnits.contains(getLdStRegOp(FirstMI).getReg())) { Flags.setMergeForward(true); Flags.clearRenameReg(); return MBBI; @@ -1761,7 +1761,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, // Otherwise, if the base register is modified, we have no match, so // return early. - if (!ModifiedRegUnits.available(BaseReg)) + if (ModifiedRegUnits.contains(BaseReg)) return E; // Update list of instructions that read/write memory. @@ -1987,8 +1987,8 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward( // return early. // If we are optimizing SP, do not allow instructions that may load or store // in between the load and the optimized value update. - if (!ModifiedRegUnits.available(BaseReg) || - !UsedRegUnits.available(BaseReg) || + if (ModifiedRegUnits.contains(BaseReg) || + UsedRegUnits.contains(BaseReg) || (BaseRegSP && MBBI->mayLoadOrStore())) return E; } @@ -2062,8 +2062,8 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward( // Otherwise, if the base register is used or modified, we have no match, so // return early. - if (!ModifiedRegUnits.available(BaseReg) || - !UsedRegUnits.available(BaseReg)) + if (ModifiedRegUnits.contains(BaseReg) || + UsedRegUnits.contains(BaseReg)) return E; // Keep track if we have a memory access before an SP pre-increment, in this // case we need to validate later that the update amount respects the red diff --git a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp index 1494312886a40..a4fc06cce5bd4 100644 --- a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -190,7 +190,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock( // register of the compare is not modified (including a self-clobbering // compare) between the compare and conditional branch we known the value // of the 1st source operand. - if (PredI.getOperand(2).isImm() && DomBBClobberedRegs.available(SrcReg) && + if (PredI.getOperand(2).isImm() && !DomBBClobberedRegs.contains(SrcReg) && SrcReg != DstReg) { // We've found the instruction that sets NZCV. int32_t KnownImm = PredI.getOperand(2).getImm(); @@ -210,7 +210,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock( // The destination register must not be modified between the NZCV setting // instruction and the conditional branch. - if (!DomBBClobberedRegs.available(DstReg)) + if (DomBBClobberedRegs.contains(DstReg)) return Res; FirstUse = PredI; @@ -254,7 +254,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock( // The destination register of the NZCV setting instruction must not be // modified before the conditional branch. - if (!DomBBClobberedRegs.available(DstReg)) + if (DomBBClobberedRegs.contains(DstReg)) return false; // We've found the instruction that sets NZCV whose DstReg == 0. @@ -323,12 +323,12 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { MCPhysReg CopyDstReg = PredI->getOperand(0).getReg(); MCPhysReg CopySrcReg = PredI->getOperand(1).getReg(); for (auto &KnownReg : KnownRegs) { - if (!OptBBClobberedRegs.available(KnownReg.Reg)) + if (OptBBClobberedRegs.contains(KnownReg.Reg)) continue; // If we have X = COPY Y, and Y is known to be zero, then now X is // known to be zero. if (CopySrcReg == KnownReg.Reg && - OptBBClobberedRegs.available(CopyDstReg)) { + !OptBBClobberedRegs.contains(CopyDstReg)) { KnownRegs.push_back(RegImm(CopyDstReg, KnownReg.Imm)); if (SeenFirstUse) FirstUse = PredI; @@ -337,7 +337,7 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { // If we have X = COPY Y, and X is known to be zero, then now Y is // known to be zero. if (CopyDstReg == KnownReg.Reg && - OptBBClobberedRegs.available(CopySrcReg)) { + !OptBBClobberedRegs.contains(CopySrcReg)) { KnownRegs.push_back(RegImm(CopySrcReg, KnownReg.Imm)); if (SeenFirstUse) FirstUse = PredI; @@ -354,7 +354,7 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { OptBBUsedRegs, TRI); // Stop if all of the known-zero regs have been clobbered. if (all_of(KnownRegs, [&](RegImm KnownReg) { - return !OptBBClobberedRegs.available(KnownReg.Reg); + return OptBBClobberedRegs.contains(KnownReg.Reg); })) break; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp index 94584e20f5ab3..ff3404b6584ac 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp @@ -198,7 +198,7 @@ bool AArch64PostSelectOptimize::optimizeNZCVDefs(MachineBasicBlock &MBB) { LRU.addLiveOuts(MBB); for (auto &II : instructionsWithoutDebug(MBB.rbegin(), MBB.rend())) { - bool NZCVDead = LRU.available(AArch64::NZCV); + bool NZCVDead = !LRU.contains(AArch64::NZCV); if (NZCVDead && II.definesRegister(AArch64::NZCV)) { // The instruction defines NZCV, but NZCV is dead. unsigned NewOpc = getNonFlagSettingVariant(II.getOpcode()); diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 1ffdde0360cf6..f087ce71f8ba7 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -6259,8 +6259,8 @@ bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, LRU.accumulate(MI); // Check if each of the unsafe registers are available... - bool R12AvailableInBlock = LRU.available(ARM::R12); - bool CPSRAvailableInBlock = LRU.available(ARM::CPSR); + bool R12AvailableInBlock = !LRU.contains(ARM::R12); + bool CPSRAvailableInBlock = !LRU.contains(ARM::CPSR); // If all of these are dead (and not live out), we know we don't have to check // them later. @@ -6272,9 +6272,9 @@ bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, // If any of these registers is available in the MBB, but also a live out of // the block, then we know outlining is unsafe. - if (R12AvailableInBlock && !LRU.available(ARM::R12)) + if (R12AvailableInBlock && LRU.contains(ARM::R12)) return false; - if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR)) + if (CPSRAvailableInBlock && LRU.contains(ARM::CPSR)) return false; // Check if there's a call inside this MachineBasicBlock. If there is, then @@ -6287,7 +6287,7 @@ bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, bool LRIsAvailable = MBB.isReturnBlock() && !MBB.back().isCall() ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend()) - : LRU.available(ARM::LR); + : !LRU.contains(ARM::LR); if (!LRIsAvailable) Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere; diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp index a3f31df368c56..debd6e003d521 100644 --- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -316,7 +316,7 @@ bool HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, Register R = BI.getOperand(0).getReg(); if (R.isPhysical()) { - if (Defs.available(R)) + if (!Defs.contains(R)) ReuseBP = R; } else if (R.isVirtual()) { // Extending a range of a virtual register can be dangerous, diff --git a/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp index 6c1b0cf5ca7fe..0ba4215c2582a 100644 --- a/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp +++ b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp @@ -150,9 +150,9 @@ RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI, // If paired destination register was modified or used, the source reg // was modified, there is no possibility of finding matching // instruction so exit early. - if (!ModifiedRegUnits.available(DestReg) || - !UsedRegUnits.available(DestReg) || - !ModifiedRegUnits.available(SourceReg)) + if (ModifiedRegUnits.contains(DestReg) || + UsedRegUnits.contains(DestReg) || + ModifiedRegUnits.contains(SourceReg)) return E; return I; @@ -162,9 +162,9 @@ RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI, (RegPair.Destination->getReg() == DestReg)) return E; - if (!ModifiedRegUnits.available(DestReg) || - !UsedRegUnits.available(DestReg) || - !ModifiedRegUnits.available(SourceReg)) + if (ModifiedRegUnits.contains(DestReg) || + UsedRegUnits.contains(DestReg) || + ModifiedRegUnits.contains(SourceReg)) return E; return I; diff --git a/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp index f885adca669fe..f6c5a0bddd752 100644 --- a/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp @@ -100,8 +100,8 @@ bool RISCVPushPopOpt::adjustRetVal(MachineBasicBlock::iterator &MBBI) { LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI); // If a0 was modified or used, there is no possibility // of using ret_val slot of POP instruction. - if (!ModifiedRegUnits.available(RISCV::X10) || - !UsedRegUnits.available(RISCV::X10)) + if (ModifiedRegUnits.contains(RISCV::X10) || + UsedRegUnits.contains(RISCV::X10)) return false; } return false;