@@ -1534,20 +1534,6 @@ static unsigned getMovOpc(bool IsScalar) {
15341534 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
15351535}
15361536
1537- static void mutateCopyOp (MachineInstr &MI, const MCInstrDesc &NewDesc) {
1538- MI.setDesc (NewDesc);
1539-
1540- // Remove any leftover implicit operands from mutating the instruction. e.g.
1541- // if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1542- // anymore.
1543- const MCInstrDesc &Desc = MI.getDesc ();
1544- unsigned NumOps = Desc.getNumOperands () + Desc.implicit_uses ().size () +
1545- Desc.implicit_defs ().size ();
1546-
1547- for (unsigned I = MI.getNumOperands () - 1 ; I >= NumOps; --I)
1548- MI.removeOperand (I);
1549- }
1550-
15511537std::optional<int64_t >
15521538SIFoldOperandsImpl::getImmOrMaterializedImm (MachineOperand &Op) const {
15531539 if (Op.isImm ())
@@ -1586,7 +1572,8 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
15861572 Opc == AMDGPU::S_NOT_B32) &&
15871573 Src0Imm) {
15881574 MI->getOperand (1 ).ChangeToImmediate (~*Src0Imm);
1589- mutateCopyOp (*MI, TII->get (getMovOpc (Opc == AMDGPU::S_NOT_B32)));
1575+ TII->mutateAndCleanupImplicit (
1576+ *MI, TII->get (getMovOpc (Opc == AMDGPU::S_NOT_B32)));
15901577 return true ;
15911578 }
15921579
@@ -1614,7 +1601,7 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
16141601 // instruction.
16151602 MI->getOperand (Src0Idx).ChangeToImmediate (NewImm);
16161603 MI->removeOperand (Src1Idx);
1617- mutateCopyOp (*MI, TII->get (getMovOpc (IsSGPR)));
1604+ TII-> mutateAndCleanupImplicit (*MI, TII->get (getMovOpc (IsSGPR)));
16181605 return true ;
16191606 }
16201607
@@ -1634,11 +1621,12 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
16341621 if (Src1Val == 0 ) {
16351622 // y = or x, 0 => y = copy x
16361623 MI->removeOperand (Src1Idx);
1637- mutateCopyOp (*MI, TII->get (AMDGPU::COPY));
1624+ TII-> mutateAndCleanupImplicit (*MI, TII->get (AMDGPU::COPY));
16381625 } else if (Src1Val == -1 ) {
16391626 // y = or x, -1 => y = v_mov_b32 -1
16401627 MI->removeOperand (Src1Idx);
1641- mutateCopyOp (*MI, TII->get (getMovOpc (Opc == AMDGPU::S_OR_B32)));
1628+ TII->mutateAndCleanupImplicit (
1629+ *MI, TII->get (getMovOpc (Opc == AMDGPU::S_OR_B32)));
16421630 } else
16431631 return false ;
16441632
@@ -1650,11 +1638,12 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
16501638 if (Src1Val == 0 ) {
16511639 // y = and x, 0 => y = v_mov_b32 0
16521640 MI->removeOperand (Src0Idx);
1653- mutateCopyOp (*MI, TII->get (getMovOpc (Opc == AMDGPU::S_AND_B32)));
1641+ TII->mutateAndCleanupImplicit (
1642+ *MI, TII->get (getMovOpc (Opc == AMDGPU::S_AND_B32)));
16541643 } else if (Src1Val == -1 ) {
16551644 // y = and x, -1 => y = copy x
16561645 MI->removeOperand (Src1Idx);
1657- mutateCopyOp (*MI, TII->get (AMDGPU::COPY));
1646+ TII-> mutateAndCleanupImplicit (*MI, TII->get (AMDGPU::COPY));
16581647 } else
16591648 return false ;
16601649
@@ -1666,7 +1655,7 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
16661655 if (Src1Val == 0 ) {
16671656 // y = xor x, 0 => y = copy x
16681657 MI->removeOperand (Src1Idx);
1669- mutateCopyOp (*MI, TII->get (AMDGPU::COPY));
1658+ TII-> mutateAndCleanupImplicit (*MI, TII->get (AMDGPU::COPY));
16701659 return true ;
16711660 }
16721661 }
@@ -1712,7 +1701,7 @@ bool SIFoldOperandsImpl::tryFoldCndMask(MachineInstr &MI) const {
17121701 MI.removeOperand (Src1ModIdx);
17131702 if (Src0ModIdx != -1 )
17141703 MI.removeOperand (Src0ModIdx);
1715- mutateCopyOp (MI, NewDesc);
1704+ TII-> mutateAndCleanupImplicit (MI, NewDesc);
17161705 LLVM_DEBUG (dbgs () << MI);
17171706 return true ;
17181707}
0 commit comments