diff --git a/llvm/include/llvm/CodeGen/RegisterPressure.h b/llvm/include/llvm/CodeGen/RegisterPressure.h index 407afee343ce2..88f43116c0b46 100644 --- a/llvm/include/llvm/CodeGen/RegisterPressure.h +++ b/llvm/include/llvm/CodeGen/RegisterPressure.h @@ -278,7 +278,7 @@ class LiveRegSet { unsigned getSparseIndexFromReg(Register Reg) const { if (Reg.isVirtual()) - return Register::virtReg2Index(Reg) + NumRegUnits; + return Reg.virtRegIndex() + NumRegUnits; assert(Reg < NumRegUnits); return Reg; } diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index 63460f5a0dae3..114149ff53d85 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -1383,9 +1383,7 @@ class BitMaskClassIterator { // This is useful when building IndexedMaps keyed on virtual registers struct VirtReg2IndexFunctor { using argument_type = Register; - unsigned operator()(Register Reg) const { - return Register::virtReg2Index(Reg); - } + unsigned operator()(Register Reg) const { return Reg.virtRegIndex(); } }; /// Prints virtual and physical registers with or without a TRI instance. diff --git a/llvm/lib/CodeGen/DetectDeadLanes.cpp b/llvm/lib/CodeGen/DetectDeadLanes.cpp index 86e9f3abe010d..a6d2640ed044f 100644 --- a/llvm/lib/CodeGen/DetectDeadLanes.cpp +++ b/llvm/lib/CodeGen/DetectDeadLanes.cpp @@ -118,7 +118,7 @@ void DeadLaneDetector::addUsedLanesOnOperand(const MachineOperand &MO, UsedLanes = TRI->composeSubRegIndexLaneMask(MOSubReg, UsedLanes); UsedLanes &= MRI->getMaxLaneMaskForVReg(MOReg); - unsigned MORegIdx = Register::virtReg2Index(MOReg); + unsigned MORegIdx = MOReg.virtRegIndex(); DeadLaneDetector::VRegInfo &MORegInfo = VRegInfos[MORegIdx]; LaneBitmask PrevUsedLanes = MORegInfo.UsedLanes; // Any change at all? @@ -147,7 +147,7 @@ DeadLaneDetector::transferUsedLanes(const MachineInstr &MI, const MachineOperand &MO) const { unsigned OpNum = MO.getOperandNo(); assert(lowersToCopies(MI) && - DefinedByCopy[Register::virtReg2Index(MI.getOperand(0).getReg())]); + DefinedByCopy[MI.getOperand(0).getReg().virtRegIndex()]); switch (MI.getOpcode()) { case TargetOpcode::COPY: @@ -204,7 +204,7 @@ void DeadLaneDetector::transferDefinedLanesStep(const MachineOperand &Use, Register DefReg = Def.getReg(); if (!DefReg.isVirtual()) return; - unsigned DefRegIdx = Register::virtReg2Index(DefReg); + unsigned DefRegIdx = DefReg.virtRegIndex(); if (!DefinedByCopy.test(DefRegIdx)) return; @@ -433,7 +433,7 @@ bool DetectDeadLanes::isUndefInput(const DeadLaneDetector &DLD, Register DefReg = Def.getReg(); if (!DefReg.isVirtual()) return false; - unsigned DefRegIdx = Register::virtReg2Index(DefReg); + unsigned DefRegIdx = DefReg.virtRegIndex(); if (!DLD.isDefinedByCopy(DefRegIdx)) return false; @@ -506,7 +506,7 @@ DetectDeadLanes::modifySubRegisterOperandStatus(const DeadLaneDetector &DLD, Register Reg = MO.getReg(); if (!Reg.isVirtual()) continue; - unsigned RegIdx = Register::virtReg2Index(Reg); + unsigned RegIdx = Reg.virtRegIndex(); const DeadLaneDetector::VRegInfo &RegInfo = DLD.getVRegInfo(RegIdx); if (MO.isDef() && !MO.isDead() && RegInfo.UsedLanes.none()) { LLVM_DEBUG(dbgs() diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp index 5fe52035d063f..6c0e9f9e930b9 100644 --- a/llvm/lib/CodeGen/InitUndef.cpp +++ b/llvm/lib/CodeGen/InitUndef.cpp @@ -142,8 +142,7 @@ bool InitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI, Register Reg = UseMO.getReg(); if (NewRegs.count(Reg)) continue; - DeadLaneDetector::VRegInfo Info = - DLD.getVRegInfo(Register::virtReg2Index(Reg)); + DeadLaneDetector::VRegInfo Info = DLD.getVRegInfo(Reg.virtRegIndex()); if (Info.UsedLanes == Info.DefinedLanes) continue; diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index db65ebae7a7e9..8509369fe09c8 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -3178,7 +3178,7 @@ struct VRegFilter { for (Register Reg : FromRegSet) { if (!Reg.isVirtual()) continue; - unsigned Index = Register::virtReg2Index(Reg); + unsigned Index = Reg.virtRegIndex(); if (Index < SparseUniverseMax) { if (Index < SparseUniverse && Sparse.test(Index)) continue; @@ -3201,7 +3201,7 @@ struct VRegFilter { Dense.reserve(NewDenseSize); for (unsigned I = Begin; I < End; ++I) { Register Reg = ToVRegs[I]; - unsigned Index = Register::virtReg2Index(Reg); + unsigned Index = Reg.virtRegIndex(); if (Index < SparseUniverseMax) Sparse.set(Index); else diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index e2309b65cf9a2..14128dafbe4ee 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -207,9 +207,7 @@ class RegAllocFastImpl { explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {} - unsigned getSparseSetIndex() const { - return Register::virtReg2Index(VirtReg); - } + unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); } }; using LiveRegMap = SparseSet, uint16_t>; @@ -349,11 +347,11 @@ class RegAllocFastImpl { unsigned calcSpillCost(MCPhysReg PhysReg) const; LiveRegMap::iterator findLiveVirtReg(Register VirtReg) { - return LiveVirtRegs.find(Register::virtReg2Index(VirtReg)); + return LiveVirtRegs.find(VirtReg.virtRegIndex()); } LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const { - return LiveVirtRegs.find(Register::virtReg2Index(VirtReg)); + return LiveVirtRegs.find(VirtReg.virtRegIndex()); } void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg); @@ -493,7 +491,7 @@ static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, /// Returns false if \p VirtReg is known to not live out of the current block. bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { - if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) { + if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) { // Cannot be live-out if there are no successors. return !MBB->succ_empty(); } @@ -506,7 +504,7 @@ bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { // Find the first def in the self loop MBB. for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) { if (DefInst.getParent() != MBB) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return true; } else { if (!SelfLoopDef || dominates(PosIndexes, DefInst, *SelfLoopDef)) @@ -514,7 +512,7 @@ bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { } } if (!SelfLoopDef) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return true; } } @@ -525,7 +523,7 @@ bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { unsigned C = 0; for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) { if (UseInst.getParent() != MBB || ++C >= Limit) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); // Cannot be live-out if there are no successors. return !MBB->succ_empty(); } @@ -535,7 +533,7 @@ bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { // value inside a self looping block. if (SelfLoopDef == &UseInst || !dominates(PosIndexes, *SelfLoopDef, UseInst)) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return true; } } @@ -546,7 +544,7 @@ bool RegAllocFastImpl::mayLiveOut(Register VirtReg) { /// Returns false if \p VirtReg is known to not be live into the current block. bool RegAllocFastImpl::mayLiveIn(Register VirtReg) { - if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) + if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) return !MBB->pred_empty(); // See if the first \p Limit def of the register are all in the current block. @@ -554,7 +552,7 @@ bool RegAllocFastImpl::mayLiveIn(Register VirtReg) { unsigned C = 0; for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) { if (DefInst.getParent() != MBB || ++C >= Limit) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return !MBB->pred_empty(); } } diff --git a/llvm/lib/CodeGen/RegisterScavenging.cpp b/llvm/lib/CodeGen/RegisterScavenging.cpp index 126f8e6d039d5..350ae2048bfa4 100644 --- a/llvm/lib/CodeGen/RegisterScavenging.cpp +++ b/llvm/lib/CodeGen/RegisterScavenging.cpp @@ -412,8 +412,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI, // We only care about virtual registers and ignore virtual registers // created by the target callbacks in the process (those will be handled // in a scavenging round). - if (!Reg.isVirtual() || - Register::virtReg2Index(Reg) >= InitialNumVirtRegs) + if (!Reg.isVirtual() || Reg.virtRegIndex() >= InitialNumVirtRegs) continue; if (!MO.readsReg()) continue; @@ -432,8 +431,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI, continue; Register Reg = MO.getReg(); // Only vregs, no newly created vregs (see above). - if (!Reg.isVirtual() || - Register::virtReg2Index(Reg) >= InitialNumVirtRegs) + if (!Reg.isVirtual() || Reg.virtRegIndex() >= InitialNumVirtRegs) continue; // We have to look at all operands anyway so we can precalculate here // whether there is a reading operand. This allows use to skip the use diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 899f83bbc6064..66db2ae993de8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -698,7 +698,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { Def->getParent()->insert(std::next(InsertPos), MI); } else LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg" - << Register::virtReg2Index(Reg) << "\n"); + << printReg(Reg) << '\n'); } // Don't try and extend through copies in instruction referencing mode. diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index ba528f66980fa..db40d5880672e 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -116,7 +116,7 @@ Printable printReg(Register Reg, const TargetRegisterInfo *TRI, if (Name != "") { OS << '%' << Name; } else { - OS << '%' << Register::virtReg2Index(Reg); + OS << '%' << Reg.virtRegIndex(); } } else if (!TRI) OS << '$' << "physreg" << Reg.id(); diff --git a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp index f819540391801..51bf4b8c30044 100644 --- a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp +++ b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp @@ -284,9 +284,8 @@ std::optional AArch64StackTaggingPreRA::findFirstSlotCandidate() { WorkList.push_back(DstReg); continue; } - LLVM_DEBUG(dbgs() << "[" << ST.FI << ":" << ST.Tag << "] use of %" - << Register::virtReg2Index(UseReg) << " in " << UseI - << "\n"); + LLVM_DEBUG(dbgs() << "[" << ST.FI << ":" << ST.Tag << "] use of " + << printReg(UseReg) << " in " << UseI << "\n"); Score++; } } diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp index b29cd86cc3412..2231fcd0c51c5 100644 --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -264,7 +264,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) { } if (MRI->getRegClass(T) != DoubleRC) continue; - unsigned u = Register::virtReg2Index(T); + unsigned u = T.virtRegIndex(); if (FixedRegs[u]) continue; LLVM_DEBUG(dbgs() << ' ' << printReg(T, TRI)); diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp index 43d81f64613b7..1b6da5781ac6b 100644 --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -187,12 +187,11 @@ struct PPCMIPeephole : public MachineFunctionPass { #define addRegToUpdate(R) addRegToUpdateWithLine(R, __LINE__) void PPCMIPeephole::addRegToUpdateWithLine(Register Reg, int Line) { - if (!Register::isVirtualRegister(Reg)) + if (!Reg.isVirtual()) return; if (RegsToUpdate.insert(Reg).second) - LLVM_DEBUG(dbgs() << "Adding register: " << Register::virtReg2Index(Reg) - << " on line " << Line - << " for re-computation of kill flags\n"); + LLVM_DEBUG(dbgs() << "Adding register: " << printReg(Reg) << " on line " + << Line << " for re-computation of kill flags\n"); } // Initialize class variables. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index d60b59e73b462..5c1f036abef5a 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -344,7 +344,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { const TargetRegisterClass *RC = MRI.getRegClass(OldReg); Register NewReg = MRI.createVirtualRegister(RC); auto InsertPt = std::next(MI.getIterator()); - if (UseEmpty[Register::virtReg2Index(OldReg)]) { + if (UseEmpty[OldReg.virtRegIndex()]) { unsigned Opc = getDropOpcode(RC); MachineInstr *Drop = BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc)) diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index 7dc5c099c1270..6efe09d775c4b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -311,8 +311,8 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { // If we reassigned the stack pointer, update the debug frame base info. if (Old != New && MFI.isFrameBaseVirtual() && MFI.getFrameBaseVreg() == Old) MFI.setFrameBaseVreg(New); - LLVM_DEBUG(dbgs() << "Assigning vreg" << Register::virtReg2Index(LI->reg()) - << " to vreg" << Register::virtReg2Index(New) << "\n"); + LLVM_DEBUG(dbgs() << "Assigning vreg " << printReg(LI->reg()) << " to vreg " + << printReg(New) << "\n"); } if (!Changed) return false; diff --git a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp index 3812ea0ebd3f3..4486a1cae782c 100644 --- a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp +++ b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp @@ -136,12 +136,12 @@ int X86FastPreTileConfig::getStackSpaceFor(Register VirtReg) { /// If \p VirtReg live out of the current MBB, it must live out of the current /// config bool X86FastPreTileConfig::mayLiveOut(Register VirtReg, MachineInstr *CfgMI) { - if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) + if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) return true; for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) { if (UseInst.getParent() != MBB) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return true; } @@ -150,7 +150,7 @@ bool X86FastPreTileConfig::mayLiveOut(Register VirtReg, MachineInstr *CfgMI) { // tile register. if (CfgMI) { if (dominates(*MBB, *CfgMI, UseInst)) { - MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); + MayLiveAcrossBlocks.set(VirtReg.virtRegIndex()); return true; } } @@ -355,7 +355,7 @@ void X86FastPreTileConfig::convertPHI(MachineBasicBlock *MBB, // Mark it as liveout, so that it will be spilled when visit // the incoming MBB. Otherwise since phi will be deleted, it // would miss spill when visit incoming MBB. - MayLiveAcrossBlocks.set(Register::virtReg2Index(InTileReg)); + MayLiveAcrossBlocks.set(InTileReg.virtRegIndex()); MachineBasicBlock *InMBB = PHI.getOperand(I + 1).getMBB(); MachineInstr *TileDefMI = MRI->getVRegDef(InTileReg);