diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index e55b8f6652e31..79174066d377f 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2664,15 +2664,15 @@ LSRInstance::OptimizeLoopTermCond() { // Conservatively avoid trying to use the post-inc value in non-latch // exits if there may be pre-inc users in intervening blocks. if (LatchBlock != ExitingBlock) - for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) + for (const IVStrideUse &UI : IU) // Test if the use is reachable from the exiting block. This dominator // query is a conservative approximation of reachability. - if (&*UI != CondUse && - !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { + if (&UI != CondUse && + !DT.properlyDominates(UI.getUser()->getParent(), ExitingBlock)) { // Conservatively assume there may be reuse if the quotient of their // strides could be a legal scale. const SCEV *A = IU.getStride(*CondUse, L); - const SCEV *B = IU.getStride(*UI, L); + const SCEV *B = IU.getStride(UI, L); if (!A || !B) continue; if (SE.getTypeSizeInBits(A->getType()) != SE.getTypeSizeInBits(B->getType())) { @@ -2693,9 +2693,9 @@ LSRInstance::OptimizeLoopTermCond() { C->getValue().isMinSignedValue()) goto decline_post_inc; // Check for possible scaled-address reuse. - if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) { - MemAccessTy AccessTy = getAccessType( - TTI, UI->getUser(), UI->getOperandValToReplace()); + if (isAddressUse(TTI, UI.getUser(), UI.getOperandValToReplace())) { + MemAccessTy AccessTy = + getAccessType(TTI, UI.getUser(), UI.getOperandValToReplace()); int64_t Scale = C->getSExtValue(); if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, /*BaseOffset=*/0,