|
77 | 77 | #include "llvm/Analysis/ScalarEvolution.h" |
78 | 78 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
79 | 79 | #include "llvm/Analysis/ScalarEvolutionNormalization.h" |
| 80 | +#include "llvm/Analysis/ScalarEvolutionPatternMatch.h" |
80 | 81 | #include "llvm/Analysis/TargetLibraryInfo.h" |
81 | 82 | #include "llvm/Analysis/TargetTransformInfo.h" |
82 | 83 | #include "llvm/Analysis/ValueTracking.h" |
83 | 84 | #include "llvm/BinaryFormat/Dwarf.h" |
84 | | -#include "llvm/Config/llvm-config.h" |
85 | 85 | #include "llvm/IR/BasicBlock.h" |
86 | 86 | #include "llvm/IR/Constant.h" |
87 | 87 | #include "llvm/IR/Constants.h" |
|
128 | 128 | #include <utility> |
129 | 129 |
|
130 | 130 | using namespace llvm; |
| 131 | +using namespace SCEVPatternMatch; |
131 | 132 |
|
132 | 133 | #define DEBUG_TYPE "loop-reduce" |
133 | 134 |
|
@@ -556,16 +557,17 @@ static void DoInitialMatch(const SCEV *S, Loop *L, |
556 | 557 | } |
557 | 558 |
|
558 | 559 | // Look at addrec operands. |
559 | | - if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) |
560 | | - if (!AR->getStart()->isZero() && AR->isAffine()) { |
561 | | - DoInitialMatch(AR->getStart(), L, Good, Bad, SE); |
562 | | - DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), |
563 | | - AR->getStepRecurrence(SE), |
564 | | - // FIXME: AR->getNoWrapFlags() |
565 | | - AR->getLoop(), SCEV::FlagAnyWrap), |
566 | | - L, Good, Bad, SE); |
567 | | - return; |
568 | | - } |
| 560 | + const SCEV *Start, *Step; |
| 561 | + if (match(S, m_scev_AddRec(m_SCEV(Start), m_SCEV(Step))) && |
| 562 | + !Start->isZero()) { |
| 563 | + DoInitialMatch(Start, L, Good, Bad, SE); |
| 564 | + DoInitialMatch(SE.getAddRecExpr(SE.getConstant(S->getType(), 0), Step, |
| 565 | + // FIXME: AR->getNoWrapFlags() |
| 566 | + cast<SCEVAddRecExpr>(S)->getLoop(), |
| 567 | + SCEV::FlagAnyWrap), |
| 568 | + L, Good, Bad, SE); |
| 569 | + return; |
| 570 | + } |
569 | 571 |
|
570 | 572 | // Handle a multiplication by -1 (negation) if it didn't fold. |
571 | 573 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) |
@@ -1436,22 +1438,16 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg, |
1436 | 1438 | unsigned LoopCost = 1; |
1437 | 1439 | if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) || |
1438 | 1440 | TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) { |
1439 | | - |
1440 | | - // If the step size matches the base offset, we could use pre-indexed |
1441 | | - // addressing. |
1442 | | - if (AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed()) { |
1443 | | - if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE))) |
1444 | | - if (Step->getAPInt() == F.BaseOffset.getFixedValue()) |
1445 | | - LoopCost = 0; |
1446 | | - } else if (AMK == TTI::AMK_PostIndexed) { |
1447 | | - const SCEV *LoopStep = AR->getStepRecurrence(*SE); |
1448 | | - if (isa<SCEVConstant>(LoopStep)) { |
1449 | | - const SCEV *LoopStart = AR->getStart(); |
1450 | | - if (!isa<SCEVConstant>(LoopStart) && |
1451 | | - SE->isLoopInvariant(LoopStart, L)) |
1452 | | - LoopCost = 0; |
1453 | | - } |
1454 | | - } |
| 1441 | + const SCEV *Start; |
| 1442 | + const SCEVConstant *Step; |
| 1443 | + if (match(AR, m_scev_AddRec(m_SCEV(Start), m_SCEVConstant(Step)))) |
| 1444 | + // If the step size matches the base offset, we could use pre-indexed |
| 1445 | + // addressing. |
| 1446 | + if ((AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed() && |
| 1447 | + Step->getAPInt() == F.BaseOffset.getFixedValue()) || |
| 1448 | + (AMK == TTI::AMK_PostIndexed && !isa<SCEVConstant>(Start) && |
| 1449 | + SE->isLoopInvariant(Start, L))) |
| 1450 | + LoopCost = 0; |
1455 | 1451 | } |
1456 | 1452 | C.AddRecCost += LoopCost; |
1457 | 1453 |
|
@@ -2544,13 +2540,10 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { |
2544 | 2540 | // Check the relevant induction variable for conformance to |
2545 | 2541 | // the pattern. |
2546 | 2542 | const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); |
2547 | | - const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); |
2548 | | - if (!AR || !AR->isAffine() || |
2549 | | - AR->getStart() != One || |
2550 | | - AR->getStepRecurrence(SE) != One) |
| 2543 | + if (!match(IV, m_scev_AddRec(m_scev_SpecificInt(1), m_scev_SpecificInt(1)))) |
2551 | 2544 | return Cond; |
2552 | 2545 |
|
2553 | | - assert(AR->getLoop() == L && |
| 2546 | + assert(cast<SCEVAddRecExpr>(IV)->getLoop() == L && |
2554 | 2547 | "Loop condition operand is an addrec in a different loop!"); |
2555 | 2548 |
|
2556 | 2549 | // Check the right operand of the select, and remember it, as it will |
@@ -3345,7 +3338,7 @@ void LSRInstance::CollectChains() { |
3345 | 3338 | void LSRInstance::FinalizeChain(IVChain &Chain) { |
3346 | 3339 | assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); |
3347 | 3340 | LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); |
3348 | | - |
| 3341 | + |
3349 | 3342 | for (const IVInc &Inc : Chain) { |
3350 | 3343 | LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); |
3351 | 3344 | auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); |
@@ -3848,26 +3841,27 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, |
3848 | 3841 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); |
3849 | 3842 | } |
3850 | 3843 | return nullptr; |
3851 | | - } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { |
| 3844 | + } |
| 3845 | + const SCEV *Start, *Step; |
| 3846 | + if (match(S, m_scev_AddRec(m_SCEV(Start), m_SCEV(Step)))) { |
3852 | 3847 | // Split a non-zero base out of an addrec. |
3853 | | - if (AR->getStart()->isZero() || !AR->isAffine()) |
| 3848 | + if (Start->isZero()) |
3854 | 3849 | return S; |
3855 | 3850 |
|
3856 | | - const SCEV *Remainder = CollectSubexprs(AR->getStart(), |
3857 | | - C, Ops, L, SE, Depth+1); |
| 3851 | + const SCEV *Remainder = CollectSubexprs(Start, C, Ops, L, SE, Depth + 1); |
3858 | 3852 | // Split the non-zero AddRec unless it is part of a nested recurrence that |
3859 | 3853 | // does not pertain to this loop. |
3860 | | - if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { |
| 3854 | + if (Remainder && (cast<SCEVAddRecExpr>(S)->getLoop() == L || |
| 3855 | + !isa<SCEVAddRecExpr>(Remainder))) { |
3861 | 3856 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); |
3862 | 3857 | Remainder = nullptr; |
3863 | 3858 | } |
3864 | | - if (Remainder != AR->getStart()) { |
| 3859 | + if (Remainder != Start) { |
3865 | 3860 | if (!Remainder) |
3866 | | - Remainder = SE.getConstant(AR->getType(), 0); |
3867 | | - return SE.getAddRecExpr(Remainder, |
3868 | | - AR->getStepRecurrence(SE), |
3869 | | - AR->getLoop(), |
3870 | | - //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) |
| 3861 | + Remainder = SE.getConstant(S->getType(), 0); |
| 3862 | + return SE.getAddRecExpr(Remainder, Step, |
| 3863 | + cast<SCEVAddRecExpr>(S)->getLoop(), |
| 3864 | + // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) |
3871 | 3865 | SCEV::FlagAnyWrap); |
3872 | 3866 | } |
3873 | 3867 | } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { |
@@ -3895,17 +3889,13 @@ static bool mayUsePostIncMode(const TargetTransformInfo &TTI, |
3895 | 3889 | if (LU.Kind != LSRUse::Address || |
3896 | 3890 | !LU.AccessTy.getType()->isIntOrIntVectorTy()) |
3897 | 3891 | return false; |
3898 | | - const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); |
3899 | | - if (!AR) |
3900 | | - return false; |
3901 | | - const SCEV *LoopStep = AR->getStepRecurrence(SE); |
3902 | | - if (!isa<SCEVConstant>(LoopStep)) |
| 3892 | + const SCEV *Start; |
| 3893 | + if (!match(S, m_scev_AddRec(m_SCEV(Start), m_SCEVConstant()))) |
3903 | 3894 | return false; |
3904 | 3895 | // Check if a post-indexed load/store can be used. |
3905 | | - if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || |
3906 | | - TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { |
3907 | | - const SCEV *LoopStart = AR->getStart(); |
3908 | | - if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) |
| 3896 | + if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, S->getType()) || |
| 3897 | + TTI.isIndexedStoreLegal(TTI.MIM_PostInc, S->getType())) { |
| 3898 | + if (!isa<SCEVConstant>(Start) && SE.isLoopInvariant(Start, L)) |
3909 | 3899 | return true; |
3910 | 3900 | } |
3911 | 3901 | return false; |
@@ -4164,18 +4154,15 @@ void LSRInstance::GenerateConstantOffsetsImpl( |
4164 | 4154 | // base pointer for each iteration of the loop, resulting in no extra add/sub |
4165 | 4155 | // instructions for pointer updating. |
4166 | 4156 | if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) { |
4167 | | - if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) { |
4168 | | - if (auto *StepRec = |
4169 | | - dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) { |
4170 | | - const APInt &StepInt = StepRec->getAPInt(); |
4171 | | - int64_t Step = StepInt.isNegative() ? |
4172 | | - StepInt.getSExtValue() : StepInt.getZExtValue(); |
4173 | | - |
4174 | | - for (Immediate Offset : Worklist) { |
4175 | | - if (Offset.isFixed()) { |
4176 | | - Offset = Immediate::getFixed(Offset.getFixedValue() - Step); |
4177 | | - GenerateOffset(G, Offset); |
4178 | | - } |
| 4157 | + const APInt *StepInt; |
| 4158 | + if (match(G, m_scev_AddRec(m_SCEV(), m_scev_APInt(StepInt)))) { |
| 4159 | + int64_t Step = StepInt->isNegative() ? StepInt->getSExtValue() |
| 4160 | + : StepInt->getZExtValue(); |
| 4161 | + |
| 4162 | + for (Immediate Offset : Worklist) { |
| 4163 | + if (Offset.isFixed()) { |
| 4164 | + Offset = Immediate::getFixed(Offset.getFixedValue() - Step); |
| 4165 | + GenerateOffset(G, Offset); |
4179 | 4166 | } |
4180 | 4167 | } |
4181 | 4168 | } |
@@ -6647,7 +6634,7 @@ struct SCEVDbgValueBuilder { |
6647 | 6634 | if (Op.getOp() != dwarf::DW_OP_LLVM_arg) { |
6648 | 6635 | Op.appendToVector(DestExpr); |
6649 | 6636 | continue; |
6650 | | - } |
| 6637 | + } |
6651 | 6638 |
|
6652 | 6639 | DestExpr.push_back(dwarf::DW_OP_LLVM_arg); |
6653 | 6640 | // `DW_OP_LLVM_arg n` represents the nth LocationOp in this SCEV, |
|
0 commit comments