@@ -5907,6 +5907,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
59075907 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
59085908 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
59095909 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
5910+ MachineRegisterInfo &MRI = MF->getRegInfo();
5911+ const DebugLoc &DL = MI.getDebugLoc();
59105912
59115913 switch (MI.getOpcode()) {
59125914 case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO_U32:
@@ -5947,7 +5949,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
59475949 return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B64);
59485950 case AMDGPU::S_UADDO_PSEUDO:
59495951 case AMDGPU::S_USUBO_PSEUDO: {
5950- const DebugLoc &DL = MI.getDebugLoc();
59515952 MachineOperand &Dest0 = MI.getOperand(0);
59525953 MachineOperand &Dest1 = MI.getOperand(1);
59535954 MachineOperand &Src0 = MI.getOperand(2);
@@ -5975,9 +5976,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
59755976 }
59765977 case AMDGPU::V_ADD_U64_PSEUDO:
59775978 case AMDGPU::V_SUB_U64_PSEUDO: {
5978- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5979- const DebugLoc &DL = MI.getDebugLoc();
5980-
59815979 bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO);
59825980
59835981 MachineOperand &Dest = MI.getOperand(0);
@@ -6070,9 +6068,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
60706068 // This pseudo has a chance to be selected
60716069 // only from uniform add/subcarry node. All the VGPR operands
60726070 // therefore assumed to be splat vectors.
6073- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
60746071 MachineBasicBlock::iterator MII = MI;
6075- const DebugLoc &DL = MI.getDebugLoc();
60766072 MachineOperand &Dest = MI.getOperand(0);
60776073 MachineOperand &CarryDest = MI.getOperand(1);
60786074 MachineOperand &Src0 = MI.getOperand(2);
@@ -6136,7 +6132,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
61366132 // clang-format on
61376133
61386134 unsigned SelOpc =
6139- ( ST.isWave64() ) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
6135+ ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
61406136
61416137 BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
61426138 .addImm(-1)
@@ -6165,7 +6161,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
61656161 case AMDGPU::GET_GROUPSTATICSIZE: {
61666162 assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
61676163 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
6168- DebugLoc DL = MI.getDebugLoc();
61696164 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
61706165 .add(MI.getOperand(0))
61716166 .addImm(MFI->getLDSSize());
@@ -6174,8 +6169,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
61746169 }
61756170 case AMDGPU::GET_SHADERCYCLESHILO: {
61766171 assert(MF->getSubtarget<GCNSubtarget>().hasShaderCyclesHiLoRegisters());
6177- MachineRegisterInfo &MRI = MF->getRegInfo();
6178- const DebugLoc &DL = MI.getDebugLoc();
61796172 // The algorithm is:
61806173 //
61816174 // hi1 = getreg(SHADER_CYCLES_HI)
@@ -6238,12 +6231,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
62386231 case AMDGPU::SI_KILL_I1_PSEUDO:
62396232 return splitKillBlock(MI, BB);
62406233 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
6241- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
6242-
62436234 Register Dst = MI.getOperand(0).getReg();
62446235 const MachineOperand &Src0 = MI.getOperand(1);
62456236 const MachineOperand &Src1 = MI.getOperand(2);
6246- const DebugLoc &DL = MI.getDebugLoc();
62476237 Register SrcCond = MI.getOperand(3).getReg();
62486238
62496239 Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
@@ -6296,7 +6286,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
62966286 return BB;
62976287 }
62986288 case AMDGPU::SI_BR_UNDEF: {
6299- const DebugLoc &DL = MI.getDebugLoc();
63006289 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
63016290 .add(MI.getOperand(0));
63026291 Br->getOperand(1).setIsUndef(); // read undef SCC
@@ -6312,8 +6301,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
63126301 return BB;
63136302 }
63146303 case AMDGPU::SI_CALL_ISEL: {
6315- const DebugLoc &DL = MI.getDebugLoc();
6316-
63176304 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
63186305
63196306 MachineInstrBuilder MIB;
@@ -6330,7 +6317,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
63306317 case AMDGPU::V_SUB_CO_U32_e32:
63316318 case AMDGPU::V_SUBREV_CO_U32_e32: {
63326319 // TODO: Define distinct V_*_I32_Pseudo instructions instead.
6333- const DebugLoc &DL = MI.getDebugLoc();
63346320 unsigned Opc = MI.getOpcode();
63356321
63366322 bool NeedClampOperand = false;
@@ -6411,7 +6397,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
64116397 }
64126398
64136399 if (SetRoundOp || SetDenormOp) {
6414- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
64156400 MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg());
64166401 if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) {
64176402 unsigned ImmVal = Def->getOperand(1).getImm();
@@ -6448,7 +6433,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
64486433 MI.setDesc(TII->get(AMDGPU::COPY));
64496434 return BB;
64506435 case AMDGPU::ENDPGM_TRAP: {
6451- const DebugLoc &DL = MI.getDebugLoc();
64526436 if (BB->succ_empty() && std::next(MI.getIterator()) == BB->end()) {
64536437 MI.setDesc(TII->get(AMDGPU::S_ENDPGM));
64546438 MI.addOperand(MachineOperand::CreateImm(0));
@@ -6475,7 +6459,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
64756459 }
64766460 case AMDGPU::SIMULATED_TRAP: {
64776461 assert(Subtarget->hasPrivEnabledTrap2NopBug());
6478- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
64796462 MachineBasicBlock *SplitBB =
64806463 TII->insertSimulatedTrap(MRI, *BB, MI, MI.getDebugLoc());
64816464 MI.eraseFromParent();
0 commit comments