diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index b05b8f349b8d5..ea5c42648660e 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -43,7 +43,7 @@ class StringRef; class TargetLibraryInfo; template class ArrayRef; -constexpr unsigned MaxAnalysisRecursionDepth = 6; +constexpr int MaxAnalysisRecursionDepth = 6; /// Determine which bits of V are known to be either zero or one and return /// them in the KnownZero/KnownOne bit sets. @@ -58,7 +58,8 @@ LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, - bool UseInstrInfo = true, unsigned Depth = 0); + bool UseInstrInfo = true, + int Depth = MaxAnalysisRecursionDepth); /// Returns the known bits rather than passing by reference. LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL, @@ -66,7 +67,7 @@ LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, bool UseInstrInfo = true, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Returns the known bits rather than passing by reference. LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, @@ -75,16 +76,18 @@ LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, bool UseInstrInfo = true, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth = 0); + const SimplifyQuery &Q, + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI KnownBits computeKnownBits(const Value *V, const SimplifyQuery &Q, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, - const SimplifyQuery &Q, unsigned Depth = 0); + const SimplifyQuery &Q, + int Depth = MaxAnalysisRecursionDepth); /// Compute known bits from the range metadata. /// \p KnownZero the set of bits that are known to be zero @@ -93,23 +96,22 @@ LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known); /// Merge bits known from context-dependent facts into Known. -LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, - const SimplifyQuery &Q, - unsigned Depth = 0); +LLVM_ABI void +computeKnownBitsFromContext(const Value *V, KnownBits &Known, + const SimplifyQuery &Q, + int Depth = MaxAnalysisRecursionDepth); /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or). -LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, - const KnownBits &KnownLHS, - const KnownBits &KnownRHS, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr( + const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, + const SimplifyQuery &SQ, int Depth = MaxAnalysisRecursionDepth); /// Adjust \p Known for the given select \p Arm to include information from the /// select \p Cond. -LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, - Value *Arm, bool Invert, - const SimplifyQuery &Q, - unsigned Depth = 0); +LLVM_ABI void +adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, + bool Invert, const SimplifyQuery &Q, + int Depth = MaxAnalysisRecursionDepth); /// Return true if LHS and RHS have no common bits set. LLVM_ABI bool haveNoCommonBitsSet(const WithCache &LHSCache, @@ -127,11 +129,11 @@ LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, bool UseInstrInfo = true, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, const SimplifyQuery &Q, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI); @@ -144,7 +146,7 @@ LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); /// pointer couldn't possibly be null at the specified instruction. /// Supports values with integer or pointer type and vectors of integers. LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if the two given values are negation. /// Currently can recoginze Value pair: @@ -162,22 +164,23 @@ LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y); /// Returns true if the give value is known to be non-negative. LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Returns true if the given value is known be positive (i.e. non-negative /// and non-zero). LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Returns true if the given value is known be negative (i.e. non-positive /// and non-zero). LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if the given values are known to be non-equal when defined. /// Supports scalar integer types only. LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, - const SimplifyQuery &SQ, unsigned Depth = 0); + const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); /// Return true if 'V & Mask' is known to be zero. We use this predicate to /// simplify operations downstream. Mask is known to be zero for bits that V @@ -189,7 +192,8 @@ LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, - const SimplifyQuery &SQ, unsigned Depth = 0); + const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); /// Return the number of times the sign bit of the register is replicated into /// the other bits. We know that at least 1 bit is always equal to the sign @@ -203,17 +207,15 @@ LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, bool UseInstrInfo = true, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Get the upper bound on bit size for this Value \p Op as a signed integer. /// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)). /// Similar to the APInt::getSignificantBits function. -LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, - const DataLayout &DL, - AssumptionCache *AC = nullptr, - const Instruction *CxtI = nullptr, - const DominatorTree *DT = nullptr, - unsigned Depth = 0); +LLVM_ABI unsigned ComputeMaxSignificantBits( + const Value *Op, const DataLayout &DL, AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, + int Depth = MaxAnalysisRecursionDepth); /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent /// intrinsics are treated as-if they were intrinsics. @@ -236,39 +238,36 @@ LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, /// point classes should be queried. Queries not specified in \p /// InterestedClasses should be reliable if they are determined during the /// query. -LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, - const APInt &DemandedElts, - FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI KnownFPClass computeKnownFPClass( + const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, + const SimplifyQuery &SQ, int Depth = MaxAnalysisRecursionDepth); -LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, - FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI KnownFPClass computeKnownFPClass( + const Value *V, FPClassTest InterestedClasses, const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI KnownFPClass computeKnownFPClass( const Value *V, const DataLayout &DL, FPClassTest InterestedClasses = fcAllFlags, const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr, - bool UseInstrInfo = true, unsigned Depth = 0); + bool UseInstrInfo = true, int Depth = MaxAnalysisRecursionDepth); /// Wrapper to account for known fast math flags at the use instruction. LLVM_ABI KnownFPClass computeKnownFPClass( const Value *V, const APInt &DemandedElts, FastMathFlags FMF, - FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth = 0); + FPClassTest InterestedClasses, const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); -LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF, - FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI KnownFPClass computeKnownFPClass( + const Value *V, FastMathFlags FMF, FPClassTest InterestedClasses, + const SimplifyQuery &SQ, int Depth = MaxAnalysisRecursionDepth); /// Return true if we can prove that the specified FP value is never equal to /// -0.0. Users should use caution when considering PreserveSign /// denormal-fp-math. LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if we can prove that the specified FP value is either NaN or /// never less than -0.0. @@ -278,32 +277,32 @@ LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, /// -0 --> true /// x > +0 --> true /// x < -0 --> false -LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI bool +cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); /// Return true if the floating-point scalar value is not an infinity or if /// the floating-point vector value has no infinities. Return false if a value /// could ever be infinity. LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if the floating-point value can never contain a NaN or infinity. LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if the floating-point scalar value is not a NaN or if the /// floating-point vector value has no NaN elements. Return false if a value /// could ever be NaN. LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return false if we can prove that the specified FP value's sign bit is 0. /// Return true if we can prove that the specified FP value's sign bit is 1. /// Otherwise return std::nullopt. -LLVM_ABI std::optional computeKnownFPSignBit(const Value *V, - const SimplifyQuery &SQ, - unsigned Depth = 0); +LLVM_ABI std::optional +computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, + int Depth = MaxAnalysisRecursionDepth); /// Return true if the sign bit of the FP value can be ignored by the user when /// the value is zero. @@ -658,12 +657,10 @@ LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth); /// Determine the possible constant range of an integer or vector of integer /// value. This is intended as a cheap, non-recursive check. -LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, - bool UseInstrInfo = true, - AssumptionCache *AC = nullptr, - const Instruction *CtxI = nullptr, - const DominatorTree *DT = nullptr, - unsigned Depth = 0); +LLVM_ABI ConstantRange computeConstantRange( + const Value *V, bool ForSigned, bool UseInstrInfo = true, + AssumptionCache *AC = nullptr, const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, int Depth = MaxAnalysisRecursionDepth); /// Combine constant ranges from computeConstantRange() and computeKnownBits(). LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits( @@ -777,19 +774,19 @@ LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC = nullptr, const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Returns true if V cannot be poison, but may be undef. LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, BasicBlock::iterator CtxI, const DominatorTree *DT = nullptr, - unsigned Depth = 0) { + int Depth = MaxAnalysisRecursionDepth) { // Takes an iterator as a position, passes down to Instruction * // implementation. return isGuaranteedNotToBePoison(V, AC, &*CtxI, DT, Depth); @@ -800,7 +797,7 @@ LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr, const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Return true if undefined behavior would provable be executed on the path to /// OnPathTo if Root produced a posion result. Note that this doesn't say @@ -876,9 +873,9 @@ struct SelectPatternResult { /// /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt /// -LLVM_ABI SelectPatternResult -matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, - Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); +LLVM_ABI SelectPatternResult matchSelectPattern( + Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp = nullptr, + int Depth = MaxAnalysisRecursionDepth); inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { @@ -895,7 +892,7 @@ inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS, LLVM_ABI SelectPatternResult matchDecomposedSelectPattern( CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF = FastMathFlags(), Instruction::CastOps *CastOp = nullptr, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Determine the pattern for predicate `X Pred Y ? X : Y`. LLVM_ABI SelectPatternResult getSelectPattern( @@ -972,11 +969,13 @@ LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, /// (A) LLVM_ABI std::optional isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, - bool LHSIsTrue = true, unsigned Depth = 0); + bool LHSIsTrue = true, + int Depth = MaxAnalysisRecursionDepth); LLVM_ABI std::optional isImpliedCondition(const Value *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, - bool LHSIsTrue = true, unsigned Depth = 0); + bool LHSIsTrue = true, + int Depth = MaxAnalysisRecursionDepth); /// Return the boolean condition value in the context of the given instruction /// if it is known based on dominating conditions. diff --git a/llvm/include/llvm/Analysis/WithCache.h b/llvm/include/llvm/Analysis/WithCache.h index 3bf35a889bbf2..8862e338c054f 100644 --- a/llvm/include/llvm/Analysis/WithCache.h +++ b/llvm/include/llvm/Analysis/WithCache.h @@ -45,7 +45,7 @@ template class WithCache { mutable KnownBits Known; void calculateKnownBits(const SimplifyQuery &Q) const { - Known = computeKnownBits(Pointer.getPointer(), Q, 0); + Known = computeKnownBits(Pointer.getPointer(), Q); Pointer.setInt(true); } diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h index ae3145677817d..0a7ce9702a270 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h @@ -17,6 +17,7 @@ #include "GISelWorkList.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/Register.h" #include "llvm/CodeGenTypes/LowLevelType.h" #include "llvm/IR/DebugLoc.h" @@ -599,19 +600,19 @@ LLVM_ABI bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata = true); /// Returns true if \p Reg cannot be poison and undef. -LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(Register Reg, - const MachineRegisterInfo &MRI, - unsigned Depth = 0); +LLVM_ABI bool +isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, + int Depth = MaxAnalysisRecursionDepth); /// Returns true if \p Reg cannot be poison, but may be undef. LLVM_ABI bool isGuaranteedNotToBePoison(Register Reg, const MachineRegisterInfo &MRI, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Returns true if \p Reg cannot be undef, but may be poison. LLVM_ABI bool isGuaranteedNotToBeUndef(Register Reg, const MachineRegisterInfo &MRI, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Get the type back from LLT. It won't be 100 percent accurate but returns an /// estimate of the type. diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h index fa313f5290773..2458fb20fd254 100644 --- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h +++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h @@ -203,14 +203,14 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { /// If the inversion will consume instructions, `DoesConsume` will be set to /// true. Otherwise it will be false. Value *getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, - BuilderTy *Builder, bool &DoesConsume, - unsigned Depth); + BuilderTy *Builder, bool &DoesConsume, + int Depth); Value *getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume) { DoesConsume = false; return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume, - /*Depth*/ 0); + /*Depth*/ MaxAnalysisRecursionDepth); } Value *getFreelyInverted(Value *V, bool WillInvertAllUses, @@ -431,37 +431,38 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { virtual Instruction *eraseInstFromFunction(Instruction &I) = 0; void computeKnownBits(const Value *V, KnownBits &Known, - const Instruction *CxtI, unsigned Depth = 0) const { + const Instruction *CxtI, + int Depth = MaxAnalysisRecursionDepth) const { llvm::computeKnownBits(V, Known, SQ.getWithInstruction(CxtI), Depth); } KnownBits computeKnownBits(const Value *V, const Instruction *CxtI, - unsigned Depth = 0) const { + int Depth = MaxAnalysisRecursionDepth) const { return llvm::computeKnownBits(V, SQ.getWithInstruction(CxtI), Depth); } bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, const Instruction *CxtI = nullptr, - unsigned Depth = 0) { + int Depth = MaxAnalysisRecursionDepth) { return llvm::isKnownToBeAPowerOfTwo(V, OrZero, SQ.getWithInstruction(CxtI), Depth); } bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI = nullptr, - unsigned Depth = 0) const { + int Depth = MaxAnalysisRecursionDepth) const { return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth); } unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI = nullptr, - unsigned Depth = 0) const { + int Depth = MaxAnalysisRecursionDepth) const { return llvm::ComputeNumSignBits(Op, DL, &AC, CxtI, &DT, Depth); } - unsigned ComputeMaxSignificantBits(const Value *Op, - const Instruction *CxtI = nullptr, - unsigned Depth = 0) const { + unsigned + ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI = nullptr, + int Depth = MaxAnalysisRecursionDepth) const { return llvm::ComputeMaxSignificantBits(Op, DL, &AC, CxtI, &DT, Depth); } @@ -511,7 +512,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth = 0) = 0; + int Depth = MaxAnalysisRecursionDepth) = 0; bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known) { @@ -521,7 +522,6 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { virtual Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, - unsigned Depth = 0, bool AllowMultipleUsers = false) = 0; bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 0a460786d00ea..a9c0b4f0e572b 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -90,7 +90,6 @@ using namespace llvm::PatternMatch; static cl::opt DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20)); - /// Returns the bitwidth of the given scalar or pointer type. For vector types, /// returns the element type's bitwidth. static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { @@ -133,10 +132,10 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, static void computeKnownBits(const Value *V, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth); + int Depth); void llvm::computeKnownBits(const Value *V, KnownBits &Known, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { // Since the number of lanes in a scalable vector is unknown at compile time, // we track one bit which is implicitly broadcast to all lanes. This means // that all lanes in a scalable vector are considered demanded. @@ -149,7 +148,7 @@ void llvm::computeKnownBits(const Value *V, KnownBits &Known, void llvm::computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, - bool UseInstrInfo, unsigned Depth) { + bool UseInstrInfo, int Depth) { computeKnownBits(V, Known, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth); @@ -158,7 +157,7 @@ void llvm::computeKnownBits(const Value *V, KnownBits &Known, KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo, - unsigned Depth) { + int Depth) { return computeKnownBits( V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth); } @@ -167,7 +166,7 @@ KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo, - unsigned Depth) { + int Depth) { return computeKnownBits( V, DemandedElts, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth); @@ -267,22 +266,21 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo, - unsigned Depth) { + int Depth) { return ::isKnownToBeAPowerOfTwo( V, OrZero, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth); } static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth); + const SimplifyQuery &Q, int Depth); bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { return computeKnownBits(V, SQ, Depth).isNonNegative(); } -bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { +bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ, int Depth) { if (auto *CI = dyn_cast(V)) return CI->getValue().isStrictlyPositive(); @@ -293,17 +291,16 @@ bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ, (Known.isNonZero() || isKnownNonZero(V, SQ, Depth)); } -bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { +bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ, int Depth) { return computeKnownBits(V, SQ, Depth).isNegative(); } static bool isKnownNonEqual(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth); + int Depth); bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { // We don't support looking through casts. if (V1 == V2 || V1->getType() != V2->getType()) return false; @@ -314,17 +311,17 @@ bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, } bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, - const SimplifyQuery &SQ, unsigned Depth) { + const SimplifyQuery &SQ, int Depth) { KnownBits Known(Mask.getBitWidth()); computeKnownBits(V, Known, SQ, Depth); return Mask.isSubsetOf(Known.Zero); } static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth); + const SimplifyQuery &Q, int Depth); static unsigned ComputeNumSignBits(const Value *V, const SimplifyQuery &Q, - unsigned Depth = 0) { + int Depth = MaxAnalysisRecursionDepth) { auto *FVTy = dyn_cast(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -334,7 +331,7 @@ static unsigned ComputeNumSignBits(const Value *V, const SimplifyQuery &Q, unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo, - unsigned Depth) { + int Depth) { return ::ComputeNumSignBits( V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth); } @@ -342,8 +339,7 @@ unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, - const DominatorTree *DT, - unsigned Depth) { + const DominatorTree *DT, int Depth) { unsigned SignBits = ComputeNumSignBits(V, DL, AC, CxtI, DT, Depth); return V->getType()->getScalarSizeInBits() - SignBits + 1; } @@ -352,24 +348,24 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, - const SimplifyQuery &Q, unsigned Depth) { - computeKnownBits(Op1, DemandedElts, KnownOut, Q, Depth + 1); + const SimplifyQuery &Q, int Depth) { + computeKnownBits(Op1, DemandedElts, KnownOut, Q, Depth - 1); // If one operand is unknown and we have no nowrap information, // the result will be unknown independently of the second operand. if (KnownOut.isUnknown() && !NSW && !NUW) return; - computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(Op0, DemandedElts, Known2, Q, Depth - 1); KnownOut = KnownBits::computeForAddSub(Add, NSW, NUW, Known2, KnownOut); } static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, - const SimplifyQuery &Q, unsigned Depth) { - computeKnownBits(Op1, DemandedElts, Known, Q, Depth + 1); - computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1); + const SimplifyQuery &Q, int Depth) { + computeKnownBits(Op1, DemandedElts, Known, Q, Depth - 1); + computeKnownBits(Op0, DemandedElts, Known2, Q, Depth - 1); bool isKnownNegative = false; bool isKnownNonNegative = false; @@ -406,7 +402,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool SelfMultiply = Op0 == Op1; if (SelfMultiply) SelfMultiply &= - isGuaranteedNotToBeUndef(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); + isGuaranteedNotToBeUndef(Op0, Q.AC, Q.CxtI, Q.DT, Depth - 1); Known = KnownBits::mul(Known, Known2, SelfMultiply); // Only make use of no-wrap flags if we failed to compute the sign bit @@ -796,14 +792,13 @@ static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, - bool Invert, unsigned Depth) { + bool Invert, int Depth) { Value *A, *B; - if (Depth < MaxAnalysisRecursionDepth && - match(Cond, m_LogicalOp(m_Value(A), m_Value(B)))) { + if (Depth && match(Cond, m_LogicalOp(m_Value(A), m_Value(B)))) { KnownBits Known2(Known.getBitWidth()); KnownBits Known3(Known.getBitWidth()); - computeKnownBitsFromCond(V, A, Known2, SQ, Invert, Depth + 1); - computeKnownBitsFromCond(V, B, Known3, SQ, Invert, Depth + 1); + computeKnownBitsFromCond(V, A, Known2, SQ, Invert, Depth - 1); + computeKnownBitsFromCond(V, B, Known3, SQ, Invert, Depth - 1); if (Invert ? match(Cond, m_LogicalOr(m_Value(), m_Value())) : match(Cond, m_LogicalAnd(m_Value(), m_Value()))) Known2 = Known2.unionWith(Known3); @@ -833,12 +828,12 @@ static void computeKnownBitsFromCond(const Value *V, Value *Cond, return; } - if (Depth < MaxAnalysisRecursionDepth && match(Cond, m_Not(m_Value(A)))) - computeKnownBitsFromCond(V, A, Known, SQ, !Invert, Depth + 1); + if (Depth && match(Cond, m_Not(m_Value(A)))) + computeKnownBitsFromCond(V, A, Known, SQ, !Invert, Depth - 1); } void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { // Handle injected condition. if (Q.CC && Q.CC->AffectedValues.contains(V)) computeKnownBitsFromCond(V, Q.CC->Cond, Known, Q, Q.CC->Invert, Depth); @@ -927,7 +922,7 @@ void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known, } // The remaining tests are all recursive, so bail out if we hit the limit. - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) continue; ICmpInst *Cmp = dyn_cast(Arg); @@ -956,23 +951,24 @@ void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known, /// combined for all permitted shift amounts. static void computeKnownBitsFromShiftOperator( const Operator *I, const APInt &DemandedElts, KnownBits &Known, - KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, + KnownBits &Known2, const SimplifyQuery &Q, int Depth, function_ref KF) { - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth - 1); // To limit compile-time impact, only query isKnownNonZero() if we know at // least something about the shift amount. bool ShAmtNonZero = Known.isNonZero() || (Known.getMaxValue().ult(Known.getBitWidth()) && - isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth + 1)); + isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth - 1)); Known = KF(Known2, Known, ShAmtNonZero); } -static KnownBits -getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, - const KnownBits &KnownLHS, const KnownBits &KnownRHS, - const SimplifyQuery &Q, unsigned Depth) { +static KnownBits getKnownBitsFromAndXorOr(const Operator *I, + const APInt &DemandedElts, + const KnownBits &KnownLHS, + const KnownBits &KnownRHS, + const SimplifyQuery &Q, int Depth) { unsigned BitWidth = KnownLHS.getBitWidth(); KnownBits KnownOut(BitWidth); bool IsAnd = false; @@ -1029,7 +1025,7 @@ getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) || match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) { KnownBits KnownY(BitWidth); - computeKnownBits(Y, DemandedElts, KnownY, Q, Depth + 1); + computeKnownBits(Y, DemandedElts, KnownY, Q, Depth - 1); if (KnownY.countMinTrailingOnes() > 0) { if (IsAnd) KnownOut.Zero.setBit(0); @@ -1042,7 +1038,7 @@ getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, static KnownBits computeKnownBitsForHorizontalOperation( const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth, + int Depth, const function_ref KnownBitsFunc) { APInt DemandedEltsLHS, DemandedEltsRHS; @@ -1053,8 +1049,8 @@ static KnownBits computeKnownBitsForHorizontalOperation( const auto ComputeForSingleOpFunc = [Depth, &Q, KnownBitsFunc](const Value *Op, APInt &DemandedEltsOp) { return KnownBitsFunc( - computeKnownBits(Op, DemandedEltsOp, Q, Depth + 1), - computeKnownBits(Op, DemandedEltsOp << 1, Q, Depth + 1)); + computeKnownBits(Op, DemandedEltsOp, Q, Depth - 1), + computeKnownBits(Op, DemandedEltsOp << 1, Q, Depth - 1)); }; if (DemandedEltsRHS.isZero()) @@ -1071,7 +1067,7 @@ KnownBits llvm::analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { auto *FVTy = dyn_cast(I->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -1101,14 +1097,14 @@ ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { void llvm::adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { // If we have a constant arm, we are done. if (Known.isConstant()) return; // See what condition implies about the bits of the select arm. KnownBits CondRes(Known.getBitWidth()); - computeKnownBitsFromCond(Arm, Cond, CondRes, Q, Invert, Depth + 1); + computeKnownBitsFromCond(Arm, Cond, CondRes, Q, Invert, Depth - 1); // If we don't get any information from the condition, no reason to // proceed. if (CondRes.isUnknown()) @@ -1125,7 +1121,7 @@ void llvm::adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, // Finally make sure the information we found is valid. This is relatively // expensive so it's left for the very end. - if (!isGuaranteedNotToBeUndef(Arm, Q.AC, Q.CxtI, Q.DT, Depth + 1)) + if (!isGuaranteedNotToBeUndef(Arm, Q.AC, Q.CxtI, Q.DT, Depth - 1)) return; // Finally, we know we get information from the condition and its valid, @@ -1194,8 +1190,7 @@ static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, - const SimplifyQuery &Q, - unsigned Depth) { + const SimplifyQuery &Q, int Depth) { unsigned BitWidth = Known.getBitWidth(); KnownBits Known2(BitWidth); @@ -1207,20 +1202,20 @@ static void computeKnownBitsFromOperator(const Operator *I, computeKnownBitsFromRangeMetadata(*MD, Known); break; case Instruction::And: - computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth); break; case Instruction::Or: - computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth); break; case Instruction::Xor: - computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth); break; @@ -1232,15 +1227,15 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Instruction::UDiv: { - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast(I))); break; } case Instruction::SDiv: { - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast(I))); break; @@ -1248,7 +1243,7 @@ static void computeKnownBitsFromOperator(const Operator *I, case Instruction::Select: { auto ComputeForArm = [&](Value *Arm, bool Invert) { KnownBits Res(Known.getBitWidth()); - computeKnownBits(Arm, DemandedElts, Res, Q, Depth + 1); + computeKnownBits(Arm, DemandedElts, Res, Q, Depth - 1); adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Q, Depth); return Res; }; @@ -1283,7 +1278,7 @@ static void computeKnownBitsFromOperator(const Operator *I, assert(SrcBitWidth && "SrcBitWidth can't be zero"); Known = Known.anyextOrTrunc(SrcBitWidth); - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); if (auto *Inst = dyn_cast(I); Inst && Inst->hasNonNeg() && !Known.isNegative()) Known.makeNonNegative(); @@ -1296,7 +1291,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // TODO: For now, not handling conversions like: // (bitcast i64 %x to <2 x i32>) !I->getType()->isVectorTy()) { - computeKnownBits(I->getOperand(0), Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), Known, Q, Depth - 1); break; } @@ -1306,7 +1301,7 @@ static void computeKnownBitsFromOperator(const Operator *I, V->getType()->isFPOrFPVectorTy()) { Type *FPType = V->getType()->getScalarType(); KnownFPClass Result = - computeKnownFPClass(V, DemandedElts, fcAllFlags, Q, Depth + 1); + computeKnownFPClass(V, DemandedElts, fcAllFlags, Q, Depth - 1); FPClassTest FPClasses = Result.KnownFPClasses; // TODO: Treat it as zero/poison if the use of I is unreachable. @@ -1375,7 +1370,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits KnownSrc(SubBitWidth); for (unsigned i = 0; i != SubScale; ++i) { computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, Q, - Depth + 1); + Depth - 1); unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); } @@ -1387,7 +1382,7 @@ static void computeKnownBitsFromOperator(const Operator *I, unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); Known = Known.trunc(SrcBitWidth); - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); // If the sign bit of the input is known set or clear, then we know the // top bits of the result. Known = Known.sext(BitWidth); @@ -1447,14 +1442,14 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Instruction::SRem: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::srem(Known, Known2); break; case Instruction::URem: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::urem(Known, Known2); break; case Instruction::Alloca: @@ -1463,7 +1458,7 @@ static void computeKnownBitsFromOperator(const Operator *I, case Instruction::GetElementPtr: { // Analyze all of the subscripts of this getelementptr instruction // to determine if we can prove known low zero bits. - computeKnownBits(I->getOperand(0), Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), Known, Q, Depth - 1); // Accumulate the constant indices in a separate variable // to minimize the number of calls to computeForAddSub. unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType()); @@ -1531,7 +1526,7 @@ static void computeKnownBitsFromOperator(const Operator *I, } KnownBits IndexBits = - computeKnownBits(Index, Q, Depth + 1).sextOrTrunc(IndexWidth); + computeKnownBits(Index, Q, Depth - 1).sextOrTrunc(IndexWidth); KnownBits ScalingFactor(IndexWidth); // Multiply by current sizeof type. // &A[i] == A + i * sizeof(*A[i]). @@ -1586,7 +1581,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // add sufficient tests to cover. SimplifyQuery RecQ = Q.getWithoutCondContext(); RecQ.CxtI = P; - computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1); + computeKnownBits(R, DemandedElts, Known2, RecQ, Depth - 1); switch (Opcode) { case Instruction::Shl: // A shl recurrence will only increase the tailing zeros @@ -1629,12 +1624,12 @@ static void computeKnownBitsFromOperator(const Operator *I, // Ok, we have a PHI of the form L op= R. Check for low // zero bits. RecQ.CxtI = RInst; - computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1); + computeKnownBits(R, DemandedElts, Known2, RecQ, Depth - 1); // We need to take the minimum number of known bits KnownBits Known3(BitWidth); RecQ.CxtI = LInst; - computeKnownBits(L, DemandedElts, Known3, RecQ, Depth + 1); + computeKnownBits(L, DemandedElts, Known3, RecQ, Depth - 1); Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), Known3.countMinTrailingZeros())); @@ -1696,7 +1691,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // Otherwise take the unions of the known bit sets of the operands, // taking conservative care to avoid excessive recursion. - if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) { + if (Depth > 1 && Known.isUnknown()) { // Skip if every incoming value references to ourself. if (isa_and_nonnull(P->hasConstantValue())) break; @@ -1724,8 +1719,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // want to waste time spinning around in loops. // TODO: See if we can base recursion limiter on number of incoming phi // edges so we don't overly clamp analysis. - computeKnownBits(IncValue, DemandedElts, Known2, RecQ, - MaxAnalysisRecursionDepth - 1); + computeKnownBits(IncValue, DemandedElts, Known2, RecQ, 1); // See if we can further use a conditional branch into the phi // to help us determine the range of the value. @@ -1785,7 +1779,7 @@ static void computeKnownBitsFromOperator(const Operator *I, if (const Value *RV = CB->getReturnedArgOperand()) { if (RV->getType() == I->getType()) { - computeKnownBits(RV, Known2, Q, Depth + 1); + computeKnownBits(RV, Known2, Q, Depth - 1); Known = Known.unionWith(Known2); // If the function doesn't return properly for all input values // (e.g. unreachable exits) then there might be conflicts between the @@ -1800,23 +1794,23 @@ static void computeKnownBitsFromOperator(const Operator *I, default: break; case Intrinsic::abs: { - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); Known = Known2.abs(IntMinIsPoison); break; } case Intrinsic::bitreverse: - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); Known.Zero |= Known2.Zero.reverseBits(); Known.One |= Known2.One.reverseBits(); break; case Intrinsic::bswap: - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); Known.Zero |= Known2.Zero.byteSwap(); Known.One |= Known2.One.byteSwap(); break; case Intrinsic::ctlz: { - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); // If we have a known 1, its position is our upper bound. unsigned PossibleLZ = Known2.countMaxLeadingZeros(); // If this call is poison for 0 input, the result will be less than 2^n. @@ -1827,7 +1821,7 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Intrinsic::cttz: { - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); // If we have a known 1, its position is our upper bound. unsigned PossibleTZ = Known2.countMaxTrailingZeros(); // If this call is poison for 0 input, the result will be less than 2^n. @@ -1838,7 +1832,7 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Intrinsic::ctpop: { - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); // We can bound the space the count needs. Also, bits known to be zero // can't contribute to the population. unsigned BitsPossiblySet = Known2.countMaxPopulation(); @@ -1860,8 +1854,8 @@ static void computeKnownBitsFromOperator(const Operator *I, ShiftAmt = BitWidth - ShiftAmt; KnownBits Known3(BitWidth); - computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known3, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known3, Q, Depth - 1); Known.Zero = Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); @@ -1870,29 +1864,29 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Intrinsic::uadd_sat: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::uadd_sat(Known, Known2); break; case Intrinsic::usub_sat: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::usub_sat(Known, Known2); break; case Intrinsic::sadd_sat: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::sadd_sat(Known, Known2); break; case Intrinsic::ssub_sat: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::ssub_sat(Known, Known2); break; // Vec reverse preserves bits from input vec. case Intrinsic::vector_reverse: computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known, Q, - Depth + 1); + Depth - 1); break; // for min/max/and/or reduce, any bit common to each element in the // input vec is set in the output. @@ -1902,10 +1896,10 @@ static void computeKnownBitsFromOperator(const Operator *I, case Intrinsic::vector_reduce_umin: case Intrinsic::vector_reduce_smax: case Intrinsic::vector_reduce_smin: - computeKnownBits(I->getOperand(0), Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), Known, Q, Depth - 1); break; case Intrinsic::vector_reduce_xor: { - computeKnownBits(I->getOperand(0), Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), Known, Q, Depth - 1); // The zeros common to all vecs are zero in the output. // If the number of elements is odd, then the common ones remain. If the // number of elements is even, then the common ones becomes zeros. @@ -1920,33 +1914,33 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } case Intrinsic::umin: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::umin(Known, Known2); break; case Intrinsic::umax: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::umax(Known, Known2); break; case Intrinsic::smin: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::smin(Known, Known2); unionWithMinMaxIntrinsicClamp(II, Known); break; case Intrinsic::smax: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::smax(Known, Known2); unionWithMinMaxIntrinsicClamp(II, Known); break; case Intrinsic::ptrmask: { - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); const Value *Mask = I->getOperand(1); Known2 = KnownBits(Mask->getType()->getScalarSizeInBits()); - computeKnownBits(Mask, DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(Mask, DemandedElts, Known2, Q, Depth - 1); // TODO: 1-extend would be more precise. Known &= Known2.anyextOrTrunc(BitWidth); break; @@ -1954,15 +1948,15 @@ static void computeKnownBitsFromOperator(const Operator *I, case Intrinsic::x86_sse2_pmulh_w: case Intrinsic::x86_avx2_pmulh_w: case Intrinsic::x86_avx512_pmulh_w_512: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::mulhs(Known, Known2); break; case Intrinsic::x86_sse2_pmulhu_w: case Intrinsic::x86_avx2_pmulhu_w: case Intrinsic::x86_avx512_pmulhu_w_512: - computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1); - computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1); + computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth - 1); + computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth - 1); Known = KnownBits::mulhu(Known, Known2); break; case Intrinsic::x86_sse42_crc32_64_64: @@ -2053,14 +2047,14 @@ static void computeKnownBitsFromOperator(const Operator *I, Known.Zero.setAllBits(); if (!!DemandedLHS) { const Value *LHS = Shuf->getOperand(0); - computeKnownBits(LHS, DemandedLHS, Known, Q, Depth + 1); + computeKnownBits(LHS, DemandedLHS, Known, Q, Depth - 1); // If we don't know any bits, early out. if (Known.isUnknown()) break; } if (!!DemandedRHS) { const Value *RHS = Shuf->getOperand(1); - computeKnownBits(RHS, DemandedRHS, Known2, Q, Depth + 1); + computeKnownBits(RHS, DemandedRHS, Known2, Q, Depth - 1); Known = Known.intersectWith(Known2); } break; @@ -2085,14 +2079,14 @@ static void computeKnownBitsFromOperator(const Operator *I, Known.One.setAllBits(); Known.Zero.setAllBits(); if (NeedsElt) { - computeKnownBits(Elt, Known, Q, Depth + 1); + computeKnownBits(Elt, Known, Q, Depth - 1); // If we don't know any bits, early out. if (Known.isUnknown()) break; } if (!DemandedVecElts.isZero()) { - computeKnownBits(Vec, DemandedVecElts, Known2, Q, Depth + 1); + computeKnownBits(Vec, DemandedVecElts, Known2, Q, Depth - 1); Known = Known.intersectWith(Known2); } break; @@ -2112,7 +2106,7 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt DemandedVecElts = APInt::getAllOnes(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); - computeKnownBits(Vec, DemandedVecElts, Known, Q, Depth + 1); + computeKnownBits(Vec, DemandedVecElts, Known, Q, Depth - 1); break; } case Instruction::ExtractValue: @@ -2145,8 +2139,8 @@ static void computeKnownBitsFromOperator(const Operator *I, break; case Instruction::Freeze: if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, - Depth + 1)) - computeKnownBits(I->getOperand(0), Known, Q, Depth + 1); + Depth - 1)) + computeKnownBits(I->getOperand(0), Known, Q, Depth - 1); break; } } @@ -2154,7 +2148,7 @@ static void computeKnownBitsFromOperator(const Operator *I, /// Determine which bits of V are known to be either zero or one and return /// them. KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { KnownBits Known(getBitWidth(V->getType(), Q.DL)); ::computeKnownBits(V, DemandedElts, Known, Q, Depth); return Known; @@ -2163,7 +2157,7 @@ KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, /// Determine which bits of V are known to be either zero or one and return /// them. KnownBits llvm::computeKnownBits(const Value *V, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { KnownBits Known(getBitWidth(V->getType(), Q.DL)); computeKnownBits(V, Known, Q, Depth); return Known; @@ -2185,8 +2179,7 @@ KnownBits llvm::computeKnownBits(const Value *V, const SimplifyQuery &Q, /// same width as the vector element, and the bit is set only if it is true /// for all of the demanded elements in the vector specified by DemandedElts. void computeKnownBits(const Value *V, const APInt &DemandedElts, - KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth) { + KnownBits &Known, const SimplifyQuery &Q, int Depth) { if (!DemandedElts) { // No demanded elts, better to assume we don't know anything. Known.resetAll(); @@ -2194,7 +2187,7 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts, } assert(V && "No Value?"); - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + assert(Depth >= 0 && "Invalid Search Depth"); #ifndef NDEBUG Type *Ty = V->getType(); @@ -2293,14 +2286,14 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts, Known = Range->toKnownBits(); // All recursive calls that increase depth must come after this. - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return; // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has // the bits of its aliasee. if (const GlobalAlias *GA = dyn_cast(V)) { if (!GA->isInterposable()) - computeKnownBits(GA->getAliasee(), Known, Q, Depth + 1); + computeKnownBits(GA->getAliasee(), Known, Q, Depth - 1); return; } @@ -2327,7 +2320,7 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts, /// Try to detect a recurrence that the value of the induction variable is /// always a power of two (or zero). static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, - SimplifyQuery &Q, unsigned Depth) { + SimplifyQuery &Q, int Depth) { BinaryOperator *BO = nullptr; Value *Start = nullptr, *Step = nullptr; if (!matchSimpleRecurrence(PN, BO, Start, Step)) @@ -2405,8 +2398,8 @@ static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, /// be a power of two when defined. Supports values with integer or pointer /// types and vectors of integers. bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, - const SimplifyQuery &Q, unsigned Depth) { - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + const SimplifyQuery &Q, int Depth) { + assert(Depth >= 0 && "Invalid Search Depth"); if (isa(V)) return OrZero ? match(V, m_Power2OrZero()) : match(V, m_Power2()); @@ -2468,7 +2461,7 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, return true; // The remaining tests are all recursive, so bail out if we hit the limit. - if (Depth++ == MaxAnalysisRecursionDepth) + if (Depth-- == 0) return false; switch (I->getOpcode()) { @@ -2556,7 +2549,7 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, // Recursively check all incoming values. Limit recursion to 2 levels, so // that search complexity is limited to number of operands^2. - unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); + int NewDepth = std::min(Depth, 1); return llvm::all_of(PN->operands(), [&](const Use &U) { // Value is power of 2 if it is coming from PHI node itself by induction. if (U.get() == PN) @@ -2607,7 +2600,7 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, /// /// Currently this routine does not support vector GEPs. static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { const Function *F = nullptr; if (const Instruction *I = dyn_cast(GEP)) F = I->getFunction(); @@ -2660,7 +2653,7 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, // to recurse 10k times just because we have 10k GEP operands. We don't // bail completely out because we want to handle constant GEPs regardless // of depth. - if (Depth++ >= MaxAnalysisRecursionDepth) + if (Depth-- == 0) continue; if (isKnownNonZero(GTI.getOperand(), Q, Depth)) @@ -2823,7 +2816,7 @@ static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) { static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, - bool NUW, unsigned Depth) { + bool NUW, int Depth) { // (X + (X != 0)) is non zero if (matchOpWithOpEqZero(X, Y)) return true; @@ -2868,8 +2861,7 @@ static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, } static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned BitWidth, Value *X, Value *Y, - unsigned Depth) { + unsigned BitWidth, Value *X, Value *Y, int Depth) { // (X - (X != 0)) is non zero // ((X != 0) - X) is non zero if (matchOpWithOpEqZero(X, Y)) @@ -2885,7 +2877,7 @@ static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, - bool NUW, unsigned Depth) { + bool NUW, int Depth) { // If X and Y are non-zero then so is X * Y as long as the multiplication // does not overflow. if (NSW || NUW) @@ -2913,7 +2905,7 @@ static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, - unsigned Depth) { + int Depth) { auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { switch (I->getOpcode()) { case Instruction::Shl: @@ -2964,7 +2956,7 @@ static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL); switch (I->getOpcode()) { case Instruction::Alloca: @@ -3164,7 +3156,7 @@ static bool isKnownNonZeroFromOperator(const Operator *I, // Check if all incoming values are non-zero using recursion. SimplifyQuery RecQ = Q.getWithoutCondContext(); - unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); + int NewDepth = std::min(Depth, 1); return llvm::all_of(PN->operands(), [&](const Use &U) { if (U.get() == PN) return true; @@ -3426,11 +3418,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I, /// pointer couldn't possibly be null at the specified instruction. /// Supports values with integer or pointer type and vectors of integers. bool isKnownNonZero(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { Type *Ty = V->getType(); #ifndef NDEBUG - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + assert(Depth >= 0 && "Invalid Search Depth"); if (auto *FVTy = dyn_cast(Ty)) { assert( @@ -3493,7 +3485,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, return true; // Some of the tests below are recursive, so bail out if we hit the limit. - if (Depth++ >= MaxAnalysisRecursionDepth) + if (Depth-- <= 0) return false; // Check for pointer simplifications. @@ -3520,8 +3512,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, return false; } -bool llvm::isKnownNonZero(const Value *V, const SimplifyQuery &Q, - unsigned Depth) { +bool llvm::isKnownNonZero(const Value *V, const SimplifyQuery &Q, int Depth) { auto *FVTy = dyn_cast(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -3653,7 +3644,7 @@ getInvertibleOperands(const Operator *Op1, /// implies V2 != V1. static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { const BinaryOperator *BO = dyn_cast(V1); if (!BO) return false; @@ -3673,7 +3664,7 @@ static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, Op = BO->getOperand(0); else return false; - return isKnownNonZero(Op, DemandedElts, Q, Depth + 1); + return isKnownNonZero(Op, DemandedElts, Q, Depth - 1); } return false; } @@ -3682,13 +3673,13 @@ static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, /// the multiplication is nuw or nsw. static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { if (auto *OBO = dyn_cast(V2)) { const APInt *C; return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && !C->isZero() && !C->isOne() && - isKnownNonZero(V1, DemandedElts, Q, Depth + 1); + isKnownNonZero(V1, DemandedElts, Q, Depth - 1); } return false; } @@ -3697,19 +3688,19 @@ static bool isNonEqualMul(const Value *V1, const Value *V2, /// the shift is nuw or nsw. static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { if (auto *OBO = dyn_cast(V2)) { const APInt *C; return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && - !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth + 1); + !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth - 1); } return false; } static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { // Check two PHIs are in same block. if (PN1->getParent() != PN2->getParent()) return false; @@ -3731,7 +3722,7 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, SimplifyQuery RecQ = Q.getWithoutCondContext(); RecQ.CxtI = IncomBB->getTerminator(); - if (!isKnownNonEqual(IV1, IV2, DemandedElts, RecQ, Depth + 1)) + if (!isKnownNonEqual(IV1, IV2, DemandedElts, RecQ, Depth - 1)) return false; UsedFullRecursion = true; } @@ -3740,7 +3731,7 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { const SelectInst *SI1 = dyn_cast(V1); if (!SI1) return false; @@ -3750,12 +3741,12 @@ static bool isNonEqualSelect(const Value *V1, const Value *V2, const Value *Cond2 = SI2->getCondition(); if (Cond1 == Cond2) return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(), - DemandedElts, Q, Depth + 1) && + DemandedElts, Q, Depth - 1) && isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(), - DemandedElts, Q, Depth + 1); + DemandedElts, Q, Depth - 1); } - return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Q, Depth + 1) && - isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Q, Depth + 1); + return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Q, Depth - 1) && + isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Q, Depth - 1); } // Check to see if A is both a GEP and is the incoming value for a PHI in the @@ -3811,7 +3802,7 @@ static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, } static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { if (!Q.CxtI) return false; @@ -3870,14 +3861,14 @@ static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, /// Return true if it is known that V1 != V2. static bool isKnownNonEqual(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { if (V1 == V2) return false; if (V1->getType() != V2->getType()) // We can't look through casts yet. return false; - if (Depth >= MaxAnalysisRecursionDepth) + if (Depth <= 0) return false; // See if we can recurse through (exactly one of) our operands. This @@ -3888,7 +3879,7 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2, if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { if (auto Values = getInvertibleOperands(O1, O2)) return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q, - Depth + 1); + Depth - 1); if (const PHINode *PN1 = dyn_cast(V1)) { const PHINode *PN2 = cast(V2); @@ -3936,7 +3927,7 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2, // Check PtrToInt type matches the pointer size. if (match(V1, m_PtrToIntSameSize(Q.DL, m_Value(A))) && match(V2, m_PtrToIntSameSize(Q.DL, m_Value(B)))) - return isKnownNonEqual(A, B, DemandedElts, Q, Depth + 1); + return isKnownNonEqual(A, B, DemandedElts, Q, Depth - 1); if (isKnownNonEqualFromContext(V1, V2, Q, Depth)) return true; @@ -3973,10 +3964,10 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V, static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth); + const SimplifyQuery &Q, int Depth); static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Q, Depth); assert(Result > 0 && "At least one sign bit needs to be present!"); return Result; @@ -3991,10 +3982,10 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, /// elements in the vector specified by DemandedElts. static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { Type *Ty = V->getType(); #ifndef NDEBUG - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + assert(Depth >= 0 && "Invalid Search Depth"); if (auto *FVTy = dyn_cast(Ty)) { assert( @@ -4021,7 +4012,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // Note that ConstantInt is handled by the general computeKnownBits case // below. - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return 1; if (auto *U = dyn_cast(V)) { @@ -4046,7 +4037,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, if (isa(Ty)) { // Fast case - sign splat can be simply split across the small elements. // This works for both vector and scalar sources - Tmp = ComputeNumSignBits(Src, Q, Depth + 1); + Tmp = ComputeNumSignBits(Src, Q, Depth - 1); if (Tmp == SrcBits) return TyBits; } @@ -4054,7 +4045,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, } case Instruction::SExt: Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); - return ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1) + + return ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1) + Tmp; case Instruction::SDiv: { @@ -4068,7 +4059,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // Calculate the incoming numerator bits. unsigned NumBits = - ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); // Add floor(log(C)) bits to the numerator bits. return std::min(TyBits, NumBits + Denominator->logBase2()); @@ -4077,7 +4068,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, } case Instruction::SRem: { - Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); const APInt *Denominator; // srem X, C -> we know that the result is within [-C+1,C) when C is a @@ -4108,7 +4099,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, } case Instruction::AShr: { - Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); // ashr X, C -> adds C sign bits. Vectors too. const APInt *ShAmt; if (match(U->getOperand(1), m_APInt(ShAmt))) { @@ -4131,11 +4122,11 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // all extended bits are shifted out. if (match(U->getOperand(0), m_ZExt(m_Value(X))) && ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) { - Tmp = ComputeNumSignBits(X, DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(X, DemandedElts, Q, Depth - 1); Tmp += TyBits - X->getType()->getScalarSizeInBits(); } else Tmp = - ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); if (ShAmt->uge(Tmp)) break; // Shifted all sign bits out. Tmp2 = ShAmt->getZExtValue(); @@ -4147,9 +4138,9 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, case Instruction::Or: case Instruction::Xor: // NOT is handled here. // Logical binary ops preserve the number of sign bits at the worst. - Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); if (Tmp != 1) { - Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1); + Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth - 1); FirstAnswer = std::min(Tmp, Tmp2); // We computed what we know about the sign bits as our first // answer. Now proceed to the generic code that uses @@ -4165,24 +4156,24 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, if (isSignedMinMaxClamp(U, X, CLow, CHigh)) return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); - Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth - 1); if (Tmp == 1) break; - Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Q, Depth + 1); + Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Q, Depth - 1); return std::min(Tmp, Tmp2); } case Instruction::Add: // Add can have at most one carry bit. Thus we know that the output // is, at worst, one more bit than the inputs. - Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth - 1); if (Tmp == 1) break; // Special case decrementing a value (ADD X, -1): if (const auto *CRHS = dyn_cast(U->getOperand(1))) if (CRHS->isAllOnesValue()) { KnownBits Known(TyBits); - computeKnownBits(U->getOperand(0), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(U->getOperand(0), DemandedElts, Known, Q, Depth - 1); // If the input is known to be 0 or 1, the output is 0/-1, which is // all sign bits set. @@ -4195,13 +4186,13 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, return Tmp; } - Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1); + Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth - 1); if (Tmp2 == 1) break; return std::min(Tmp, Tmp2) - 1; case Instruction::Sub: - Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1); + Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth - 1); if (Tmp2 == 1) break; @@ -4209,7 +4200,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, if (const auto *CLHS = dyn_cast(U->getOperand(0))) if (CLHS->isNullValue()) { KnownBits Known(TyBits); - computeKnownBits(U->getOperand(1), DemandedElts, Known, Q, Depth + 1); + computeKnownBits(U->getOperand(1), DemandedElts, Known, Q, Depth - 1); // If the input is known to be 0 or 1, the output is 0/-1, which is // all sign bits set. if ((Known.Zero | 1).isAllOnes()) @@ -4226,7 +4217,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // Sub can have at most one carry bit. Thus we know that the output // is, at worst, one more bit than the inputs. - Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); if (Tmp == 1) break; return std::min(Tmp, Tmp2) - 1; @@ -4235,11 +4226,11 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // The output of the Mul can be at most twice the valid bits in the // inputs. unsigned SignBitsOp0 = - ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); if (SignBitsOp0 == 1) break; unsigned SignBitsOp1 = - ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1); + ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth - 1); if (SignBitsOp1 == 1) break; unsigned OutValidBits = @@ -4263,7 +4254,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, if (Tmp == 1) return Tmp; RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i), - DemandedElts, RecQ, Depth + 1)); + DemandedElts, RecQ, Depth - 1)); } return Tmp; } @@ -4272,7 +4263,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // If the input contained enough sign bits that some remain after the // truncation, then we can make use of that. Otherwise we don't know // anything. - Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1); + Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth - 1); unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); if (Tmp > (OperandTyBits - TyBits)) return Tmp - (OperandTyBits - TyBits); @@ -4285,7 +4276,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // skip tracking the specific element. But at least we might find // information valid for all elements of the vector (for example if vector // is sign extended, shifted, etc). - return ComputeNumSignBits(U->getOperand(0), Q, Depth + 1); + return ComputeNumSignBits(U->getOperand(0), Q, Depth - 1); case Instruction::ShuffleVector: { // Collect the minimum number of sign bits that are shared by every vector @@ -4303,7 +4294,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, Tmp = std::numeric_limits::max(); if (!!DemandedLHS) { const Value *LHS = Shuf->getOperand(0); - Tmp = ComputeNumSignBits(LHS, DemandedLHS, Q, Depth + 1); + Tmp = ComputeNumSignBits(LHS, DemandedLHS, Q, Depth - 1); } // If we don't know anything, early out and try computeKnownBits // fall-back. @@ -4311,7 +4302,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, break; if (!!DemandedRHS) { const Value *RHS = Shuf->getOperand(1); - Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Q, Depth + 1); + Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Q, Depth - 1); Tmp = std::min(Tmp, Tmp2); } // If we don't know anything, early out and try computeKnownBits @@ -4328,7 +4319,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, break; case Intrinsic::abs: Tmp = - ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1); + ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth - 1); if (Tmp == 1) break; @@ -4553,20 +4544,20 @@ static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, - unsigned Depth = 0) { + int Depth = MaxAnalysisRecursionDepth) { Value *A, *B; - if (Depth < MaxAnalysisRecursionDepth && + if (Depth && (CondIsTrue ? match(Cond, m_LogicalAnd(m_Value(A), m_Value(B))) : match(Cond, m_LogicalOr(m_Value(A), m_Value(B))))) { computeKnownFPClassFromCond(V, A, CondIsTrue, CxtI, KnownFromContext, - Depth + 1); + Depth - 1); computeKnownFPClassFromCond(V, B, CondIsTrue, CxtI, KnownFromContext, - Depth + 1); + Depth - 1); return; } - if (Depth < MaxAnalysisRecursionDepth && match(Cond, m_Not(m_Value(A)))) { + if (Depth && match(Cond, m_Not(m_Value(A)))) { computeKnownFPClassFromCond(V, A, !CondIsTrue, CxtI, KnownFromContext, - Depth + 1); + Depth - 1); return; } CmpPredicate Pred; @@ -4650,11 +4641,11 @@ static KnownFPClass computeKnownFPClassFromContext(const Value *V, void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, - const SimplifyQuery &Q, unsigned Depth); + const SimplifyQuery &Q, int Depth); static void computeKnownFPClass(const Value *V, KnownFPClass &Known, FPClassTest InterestedClasses, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { auto *FVTy = dyn_cast(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -4665,15 +4656,14 @@ static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, - const SimplifyQuery &Q, - unsigned Depth) { + const SimplifyQuery &Q, int Depth) { if ((InterestedClasses & (KnownFPClass::OrderedLessThanZeroMask | fcNan)) == fcNone) return; KnownFPClass KnownSrc; computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); // Sign should be preserved // TODO: Handle cannot be ordered greater than zero @@ -4687,7 +4677,7 @@ static void computeKnownFPClassForFPTrunc(const Operator *Op, void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { assert(Known.isUnknown() && "should not be called with known information"); if (!DemandedElts) { @@ -4696,7 +4686,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, return; } - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + assert(Depth >= 0 && "Invalid Search Depth"); if (auto *CFP = dyn_cast(V)) { Known.KnownFPClasses = CFP->getValueAPF().classify(); @@ -4790,14 +4780,14 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, return; // All recursive calls that increase depth must come after this. - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return; const unsigned Opc = Op->getOpcode(); switch (Opc) { case Instruction::FNeg: { computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - Known, Q, Depth + 1); + Known, Q, Depth - 1); Known.fneg(); break; } @@ -4843,11 +4833,11 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass Known2; computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known, - Q, Depth + 1); + Q, Depth - 1); Known.KnownFPClasses &= FilterLHS; computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS, - Known2, Q, Depth + 1); + Known2, Q, Depth - 1); Known2.KnownFPClasses &= FilterRHS; Known |= Known2; @@ -4862,7 +4852,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // If we only care about the sign bit we don't need to inspect the // operand. computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, Known, Q, Depth + 1); + InterestedClasses, Known, Q, Depth - 1); } Known.fabs(); @@ -4872,9 +4862,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSign; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - Known, Q, Depth + 1); + Known, Q, Depth - 1); computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, - KnownSign, Q, Depth + 1); + KnownSign, Q, Depth - 1); Known.copysign(KnownSign); break; } @@ -4892,7 +4882,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // x * x + y is non-negative if y is non-negative. KnownFPClass KnownAddend; computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses, - KnownAddend, Q, Depth + 1); + KnownAddend, Q, Depth - 1); if (KnownAddend.cannotBeOrderedLessThanZero()) Known.knownNot(fcNegative); @@ -4906,7 +4896,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); if (KnownSrc.isKnownNeverPosInfinity()) Known.knownNot(fcPosInf); @@ -4938,7 +4928,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // Return NaN on infinite inputs. KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); Known.knownNot(fcInf); if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity()) Known.knownNot(fcNan); @@ -4952,9 +4942,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Intrinsic::maximumnum: { KnownFPClass KnownLHS, KnownRHS; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - KnownLHS, Q, Depth + 1); + KnownLHS, Q, Depth - 1); computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, - KnownRHS, Q, Depth + 1); + KnownRHS, Q, Depth - 1); bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN(); Known = KnownLHS | KnownRHS; @@ -5045,7 +5035,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Intrinsic::canonicalize: { KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); // This is essentially a stronger form of // propagateCanonicalizingSrc. Other "canonicalizing" operations don't @@ -5096,7 +5086,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // reduce min/max will choose an element from one of the vector elements, // so we can infer and class information that is common to all elements. Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(), - InterestedClasses, Q, Depth + 1); + InterestedClasses, Q, Depth - 1); // Can only propagate sign if output is never NaN. if (!Known.isKnownNeverNaN()) Known.SignBit.reset(); @@ -5106,7 +5096,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Intrinsic::vector_reverse: Known = computeKnownFPClass( II->getArgOperand(0), DemandedElts.reverseBits(), - II->getFastMathFlags(), InterestedClasses, Q, Depth + 1); + II->getFastMathFlags(), InterestedClasses, Q, Depth - 1); break; case Intrinsic::trunc: case Intrinsic::floor: @@ -5122,7 +5112,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (InterestedSrcs & fcNegFinite) InterestedSrcs |= fcNegFinite; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); // Integer results cannot be subnormal. Known.knownNot(fcSubnormal); @@ -5155,7 +5145,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); if (KnownSrc.isKnownNeverNaN()) { Known.knownNot(fcNan); Known.signBitMustBeZero(); @@ -5189,7 +5179,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); if (KnownSrc.isKnownNeverPosInfinity()) Known.knownNot(fcPosInf); @@ -5220,7 +5210,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth(); KnownBits ExponentKnownBits(BitWidth); computeKnownBits(Exp, isa(ExpTy) ? DemandedElts : APInt(1, 1), - ExponentKnownBits, Q, Depth + 1); + ExponentKnownBits, Q, Depth - 1); if (ExponentKnownBits.Zero[0]) { // Is even Known.knownNot(fcNegative); @@ -5237,7 +5227,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // pow(-inf, exp) --> -inf if exp is positive odd. KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); if (KnownSrc.isKnownNever(fcNegative)) Known.knownNot(fcNegative); break; @@ -5245,7 +5235,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Intrinsic::ldexp: { KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Q, Depth + 1); + KnownSrc, Q, Depth - 1); Known.propagateNaN(KnownSrc, /*PropagateSign=*/true); // Sign is preserved, but underflows may produce zeroes. @@ -5271,7 +5261,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, unsigned Precision = APFloat::semanticsPrecision(Flt); const Value *ExpArg = II->getArgOperand(1); ConstantRange ExpRange = computeConstantRange( - ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1); + ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth - 1); const int MantissaBits = Precision - 1; if (ExpRange.getSignedMin().sge(static_cast(MantissaBits))) @@ -5308,7 +5298,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } case Intrinsic::arithmetic_fence: { computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, - Known, Q, Depth + 1); + Known, Q, Depth - 1); break; } case Intrinsic::experimental_constrained_sitofp: @@ -5351,7 +5341,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (InterestedClasses & fcNan) InterestedSrcs |= fcInf; computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs, - KnownRHS, Q, Depth + 1); + KnownRHS, Q, Depth - 1); if ((WantNaN && KnownRHS.isKnownNeverNaN()) || (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) || @@ -5360,7 +5350,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // RHS is canonically cheaper to compute. Skip inspecting the LHS if // there's no point. computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs, - KnownLHS, Q, Depth + 1); + KnownLHS, Q, Depth - 1); // Adding positive and negative infinity produces NaN. // TODO: Check sign of infinities. if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && @@ -5419,12 +5409,12 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownLHS, KnownRHS; computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS, - Q, Depth + 1); + Q, Depth - 1); if (!KnownRHS.isKnownNeverNaN()) break; computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS, - Q, Depth + 1); + Q, Depth - 1); if (!KnownLHS.isKnownNeverNaN()) break; @@ -5483,7 +5473,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf | fcZero | fcNegative, KnownRHS, Q, - Depth + 1); + Depth - 1); bool KnowSomethingUseful = KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative); @@ -5495,7 +5485,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses & InterestedLHS, KnownLHS, Q, - Depth + 1); + Depth - 1); } const Function *F = cast(Op)->getFunction(); @@ -5544,7 +5534,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Instruction::FPExt: { // Infinity, nan and zero propagate from source. computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - Known, Q, Depth + 1); + Known, Q, Depth - 1); const fltSemantics &DstTy = Op->getType()->getScalarType()->getFltSemantics(); @@ -5613,7 +5603,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known, - Q, Depth + 1); + Q, Depth - 1); } break; @@ -5636,7 +5626,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // Do we demand the inserted element? if (NeedsElt) { - computeKnownFPClass(Elt, Known, InterestedClasses, Q, Depth + 1); + computeKnownFPClass(Elt, Known, InterestedClasses, Q, Depth - 1); // If we don't know any bits, early out. if (Known.isUnknown()) break; @@ -5648,7 +5638,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (!DemandedVecElts.isZero()) { KnownFPClass Known2; computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, Q, - Depth + 1); + Depth - 1); Known |= Known2; } @@ -5665,7 +5655,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (!!DemandedLHS) { const Value *LHS = Shuf->getOperand(0); computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, Q, - Depth + 1); + Depth - 1); // If we don't know any bits, early out. if (Known.isUnknown()) @@ -5678,7 +5668,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass Known2; const Value *RHS = Shuf->getOperand(1); computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, Q, - Depth + 1); + Depth - 1); Known |= Known2; } @@ -5697,7 +5687,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, KnownSrc, Q, Depth + 1); + InterestedClasses, KnownSrc, Q, Depth - 1); const Function *F = cast(Op)->getFunction(); const fltSemantics &FltSem = @@ -5733,7 +5723,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Q, - Depth + 1); + Depth - 1); break; } case Instruction::PHI: { @@ -5744,9 +5734,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // Otherwise take the unions of the known bit sets of the operands, // taking conservative care to avoid excessive recursion. - const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2; + const int PhiRecursionLimit = 2; - if (Depth < PhiRecursionLimit) { + if (Depth > PhiRecursionLimit) { // Skip if every incoming value references to ourself. if (isa_and_nonnull(P->hasConstantValue())) break; @@ -5791,7 +5781,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, const Type *Ty = Op->getType()->getScalarType(); KnownBits Bits(Ty->getScalarSizeInBits()); - computeKnownBits(Src, DemandedElts, Bits, Q, Depth + 1); + computeKnownBits(Src, DemandedElts, Bits, Q, Depth - 1); // Transfer information from the sign bit. if (Bits.isNonNegative()) @@ -5847,8 +5837,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth) { + const SimplifyQuery &SQ, int Depth) { KnownFPClass KnownClasses; ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, SQ, Depth); @@ -5857,8 +5846,7 @@ KnownFPClass llvm::computeKnownFPClass(const Value *V, KnownFPClass llvm::computeKnownFPClass(const Value *V, FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth) { + const SimplifyQuery &SQ, int Depth) { KnownFPClass Known; ::computeKnownFPClass(V, Known, InterestedClasses, SQ, Depth); return Known; @@ -5867,16 +5855,17 @@ KnownFPClass llvm::computeKnownFPClass(const Value *V, KnownFPClass llvm::computeKnownFPClass( const Value *V, const DataLayout &DL, FPClassTest InterestedClasses, const TargetLibraryInfo *TLI, AssumptionCache *AC, const Instruction *CxtI, - const DominatorTree *DT, bool UseInstrInfo, unsigned Depth) { + const DominatorTree *DT, bool UseInstrInfo, int Depth) { return computeKnownFPClass(V, InterestedClasses, SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo), Depth); } -KnownFPClass -llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts, - FastMathFlags FMF, FPClassTest InterestedClasses, - const SimplifyQuery &SQ, unsigned Depth) { +KnownFPClass llvm::computeKnownFPClass(const Value *V, + const APInt &DemandedElts, + FastMathFlags FMF, + FPClassTest InterestedClasses, + const SimplifyQuery &SQ, int Depth) { if (FMF.noNaNs()) InterestedClasses &= ~fcNan; if (FMF.noInfs()) @@ -5894,8 +5883,7 @@ llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass llvm::computeKnownFPClass(const Value *V, FastMathFlags FMF, FPClassTest InterestedClasses, - const SimplifyQuery &SQ, - unsigned Depth) { + const SimplifyQuery &SQ, int Depth) { auto *FVTy = dyn_cast(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -5904,27 +5892,27 @@ KnownFPClass llvm::computeKnownFPClass(const Value *V, FastMathFlags FMF, } bool llvm::cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { KnownFPClass Known = computeKnownFPClass(V, fcNegZero, SQ, Depth); return Known.isKnownNeverNegZero(); } bool llvm::cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { KnownFPClass Known = computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, SQ, Depth); return Known.cannotBeOrderedLessThanZero(); } bool llvm::isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { KnownFPClass Known = computeKnownFPClass(V, fcInf, SQ, Depth); return Known.isKnownNeverInfinity(); } /// Return true if the floating-point value can never contain a NaN or infinity. bool llvm::isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, SQ, Depth); return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity(); } @@ -5932,8 +5920,7 @@ bool llvm::isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, /// Return true if the floating-point scalar value is not a NaN or if the /// floating-point vector value has no NaN elements. Return false if a value /// could ever be NaN. -bool llvm::isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { +bool llvm::isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, int Depth) { KnownFPClass Known = computeKnownFPClass(V, fcNan, SQ, Depth); return Known.isKnownNeverNaN(); } @@ -5943,7 +5930,7 @@ bool llvm::isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, /// Otherwise return std::nullopt. std::optional llvm::computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, - unsigned Depth) { + int Depth) { KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, SQ, Depth); return Known.SignBit; } @@ -7502,18 +7489,18 @@ bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) { } static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, - unsigned Depth) { + int Depth) { if (ValAssumedPoison == V) return true; - const unsigned MaxDepth = 2; - if (Depth >= MaxDepth) + const int MaxDepth = (MaxAnalysisRecursionDepth - 2); + if (Depth <= MaxDepth) return false; if (const auto *I = dyn_cast(V)) { if (any_of(I->operands(), [=](const Use &Op) { return propagatesPoison(Op) && - directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); + directlyImpliesPoison(ValAssumedPoison, Op, Depth - 1); })) return true; @@ -7530,36 +7517,40 @@ static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, } static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, - unsigned Depth) { + int Depth) { if (isGuaranteedNotToBePoison(ValAssumedPoison)) return true; - if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) + if (directlyImpliesPoison(ValAssumedPoison, V, + /* Depth */ MaxAnalysisRecursionDepth)) return true; - const unsigned MaxDepth = 2; - if (Depth >= MaxDepth) + const int MaxDepth = (MaxAnalysisRecursionDepth - 2); + if (Depth <= MaxDepth) return false; const auto *I = dyn_cast(ValAssumedPoison); if (I && !canCreatePoison(cast(I))) { return all_of(I->operands(), [=](const Value *Op) { - return impliesPoison(Op, V, Depth + 1); + return impliesPoison(Op, V, Depth - 1); }); } return false; } bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { - return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); + return ::impliesPoison(ValAssumedPoison, V, + /* Depth */ MaxAnalysisRecursionDepth); } static bool programUndefinedIfUndefOrPoison(const Value *V, bool PoisonOnly); -static bool isGuaranteedNotToBeUndefOrPoison( - const Value *V, AssumptionCache *AC, const Instruction *CtxI, - const DominatorTree *DT, unsigned Depth, UndefPoisonKind Kind) { - if (Depth >= MaxAnalysisRecursionDepth) +static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, + AssumptionCache *AC, + const Instruction *CtxI, + const DominatorTree *DT, int Depth, + UndefPoisonKind Kind) { + if (Depth <= 0) return false; if (isa(V)) @@ -7606,7 +7597,7 @@ static bool isGuaranteedNotToBeUndefOrPoison( return true; auto OpCheck = [&](const Value *V) { - return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, Kind); + return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth - 1, Kind); }; if (auto *Opr = dyn_cast(V)) { @@ -7630,7 +7621,7 @@ static bool isGuaranteedNotToBeUndefOrPoison( continue; auto *TI = PN->getIncomingBlock(i)->getTerminator(); if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, - DT, Depth + 1, Kind)) { + DT, Depth - 1, Kind)) { IsWellDefined = false; break; } @@ -7706,21 +7697,21 @@ static bool isGuaranteedNotToBeUndefOrPoison( bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, const Instruction *CtxI, const DominatorTree *DT, - unsigned Depth) { + int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, UndefPoisonKind::UndefOrPoison); } bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, const Instruction *CtxI, - const DominatorTree *DT, unsigned Depth) { + const DominatorTree *DT, int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, UndefPoisonKind::PoisonOnly); } bool llvm::isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC, const Instruction *CtxI, - const DominatorTree *DT, unsigned Depth) { + const DominatorTree *DT, int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, UndefPoisonKind::UndefOnly); } @@ -8242,17 +8233,17 @@ static SelectPatternResult matchClamp(CmpInst::Predicate Pred, static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, - unsigned Depth) { + int Depth) { // TODO: Allow FP min/max with nnan/nsz. assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); Value *A = nullptr, *B = nullptr; - SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); + SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth - 1); if (!SelectPatternResult::isMinOrMax(L.Flavor)) return {SPF_UNKNOWN, SPNB_NA, false}; Value *C = nullptr, *D = nullptr; - SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); + SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth - 1); if (L.Flavor != R.Flavor) return {SPF_UNKNOWN, SPNB_NA, false}; @@ -8348,11 +8339,10 @@ static Value *getNotValue(Value *V) { } /// Match non-obvious integer minimum and maximum sequences. -static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, - Value *CmpLHS, Value *CmpRHS, - Value *TrueVal, Value *FalseVal, - Value *&LHS, Value *&RHS, - unsigned Depth) { +static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, + Value *CmpRHS, Value *TrueVal, + Value *FalseVal, Value *&LHS, + Value *&RHS, int Depth) { // Assume success. If there's no match, callers should not use these anyway. LHS = TrueVal; RHS = FalseVal; @@ -8584,11 +8574,10 @@ llvm::getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C) { } static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, - FastMathFlags FMF, - Value *CmpLHS, Value *CmpRHS, - Value *TrueVal, Value *FalseVal, - Value *&LHS, Value *&RHS, - unsigned Depth) { + FastMathFlags FMF, Value *CmpLHS, + Value *CmpRHS, Value *TrueVal, + Value *FalseVal, Value *&LHS, + Value *&RHS, int Depth) { bool HasMismatchedZeros = false; if (CmpInst::isFPPredicate(Pred)) { // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one @@ -8894,8 +8883,8 @@ static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, } SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp, - unsigned Depth) { - if (Depth >= MaxAnalysisRecursionDepth) + int Depth) { + if (Depth <= 0) return {SPF_UNKNOWN, SPNB_NA, false}; SelectInst *SI = dyn_cast(V); @@ -8915,7 +8904,7 @@ SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, SelectPatternResult llvm::matchDecomposedSelectPattern( CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, - FastMathFlags FMF, Instruction::CastOps *CastOp, unsigned Depth) { + FastMathFlags FMF, Instruction::CastOps *CastOp, int Depth) { CmpInst::Predicate Pred = CmpI->getPredicate(); Value *CmpLHS = CmpI->getOperand(0); Value *CmpRHS = CmpI->getOperand(1); @@ -9314,10 +9303,10 @@ isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, // C1` (see discussion: D58633). ConstantRange LCR = computeConstantRange( L1, ICmpInst::isSigned(LPred), /* UseInstrInfo=*/true, /*AC=*/nullptr, - /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); + /*CxtI=*/nullptr, /*DT=*/nullptr, /*Depth=*/1); ConstantRange RCR = computeConstantRange( R1, ICmpInst::isSigned(RPred), /* UseInstrInfo=*/true, /*AC=*/nullptr, - /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); + /*CxtI=*/nullptr, /*DT=*/nullptr, /*Depth=*/1); // Even if L1/R1 are not both constant, we can still sometimes deduce // relationship from a single constant. For example X u> Y implies X != 0. if (auto R = isImpliedCondCommonOperandWithCR(LPred, LCR, RPred, RCR)) @@ -9375,14 +9364,14 @@ isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, static std::optional isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, - const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { + const DataLayout &DL, bool LHSIsTrue, int Depth) { // The LHS must be an 'or', 'and', or a 'select' instruction. assert((LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."); - assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit"); + assert(Depth && "Hit recursion limit"); // If the result of an 'or' is false, then we know both legs of the 'or' are // false. Similarly, if the result of an 'and' is true, then we know both @@ -9392,10 +9381,10 @@ isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { // FIXME: Make this non-recursion. if (std::optional Implication = isImpliedCondition( - ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) + ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth - 1)) return Implication; if (std::optional Implication = isImpliedCondition( - ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) + ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth - 1)) return Implication; return std::nullopt; } @@ -9405,9 +9394,9 @@ isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, std::optional llvm::isImpliedCondition(const Value *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, - const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { + const DataLayout &DL, bool LHSIsTrue, int Depth) { // Bail out when we hit the limit. - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return std::nullopt; // A mismatch occurs when we compare a scalar cmp to a vector cmp, for @@ -9448,7 +9437,7 @@ llvm::isImpliedCondition(const Value *LHS, CmpPredicate RHSPred, std::optional llvm::isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, - bool LHSIsTrue, unsigned Depth) { + bool LHSIsTrue, int Depth) { // LHS ==> RHS by definition if (LHS == RHS) return LHSIsTrue; @@ -9478,7 +9467,7 @@ std::optional llvm::isImpliedCondition(const Value *LHS, const Value *RHS, return std::nullopt; } - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return std::nullopt; // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2 @@ -9486,21 +9475,21 @@ std::optional llvm::isImpliedCondition(const Value *LHS, const Value *RHS, const Value *RHS1, *RHS2; if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) { if (std::optional Imp = - isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) + isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth - 1)) if (*Imp == true) return !InvertRHS; if (std::optional Imp = - isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) + isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth - 1)) if (*Imp == true) return !InvertRHS; } if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) { if (std::optional Imp = - isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) + isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth - 1)) if (*Imp == false) return InvertRHS; if (std::optional Imp = - isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) + isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth - 1)) if (*Imp == false) return InvertRHS; } @@ -9936,11 +9925,10 @@ static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo, AssumptionCache *AC, const Instruction *CtxI, - const DominatorTree *DT, - unsigned Depth) { + const DominatorTree *DT, int Depth) { assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); if (auto *C = dyn_cast(V)) @@ -9959,9 +9947,9 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, CR = getRangeForIntrinsic(*II, UseInstrInfo); else if (auto *SI = dyn_cast(V)) { ConstantRange CRTrue = computeConstantRange( - SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); + SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth - 1); ConstantRange CRFalse = computeConstantRange( - SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); + SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth - 1); CR = CRTrue.unionWith(CRFalse); CR = CR.intersectWith(getRangeForSelectPattern(*SI, IIQ)); } else if (isa(V) || isa(V)) { @@ -10004,7 +9992,7 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, // TODO: Set "ForSigned" parameter via Cmp->isSigned()? ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, - UseInstrInfo, AC, I, DT, Depth + 1); + UseInstrInfo, AC, I, DT, Depth - 1); CR = CR.intersectWith( ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); } diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp index 64af7a57e8d12..158d1124aa723 100644 --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -1916,9 +1916,8 @@ static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, - unsigned Depth, - UndefPoisonKind Kind) { - if (Depth >= MaxAnalysisRecursionDepth) + int Depth, UndefPoisonKind Kind) { + if (Depth <= 0) return false; MachineInstr *RegDef = MRI.getVRegDef(Reg); @@ -1936,7 +1935,7 @@ static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, unsigned NumSources = BV->getNumSources(); for (unsigned I = 0; I < NumSources; ++I) if (!::isGuaranteedNotToBeUndefOrPoison(BV->getSourceReg(I), MRI, - Depth + 1, Kind)) + Depth - 1, Kind)) return false; return true; } @@ -1945,7 +1944,7 @@ static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, unsigned NumIncoming = Phi->getNumIncomingValues(); for (unsigned I = 0; I < NumIncoming; ++I) if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI, - Depth + 1, Kind)) + Depth - 1, Kind)) return false; return true; } @@ -1953,7 +1952,7 @@ static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, auto MOCheck = [&](const MachineOperand &MO) { if (!MO.isReg()) return true; - return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1, + return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth - 1, Kind); }; return !::canCreateUndefOrPoison(Reg, MRI, @@ -1977,21 +1976,20 @@ bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool llvm::isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, - unsigned Depth) { + int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth, UndefPoisonKind::UndefOrPoison); } bool llvm::isGuaranteedNotToBePoison(Register Reg, const MachineRegisterInfo &MRI, - unsigned Depth) { + int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth, UndefPoisonKind::PoisonOnly); } bool llvm::isGuaranteedNotToBeUndef(Register Reg, - const MachineRegisterInfo &MRI, - unsigned Depth) { + const MachineRegisterInfo &MRI, int Depth) { return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth, UndefPoisonKind::UndefOnly); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 2fb4bfecda8aa..70db96ef0b824 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2322,10 +2322,10 @@ foldBitwiseLogicWithIntrinsics(BinaryOperator &I, // 0 inside X and for X & Y we try to replace Y with -1 inside X. // Return the simplified result of X if successful, and nullptr otherwise. // If SimplifyOnly is true, no new instructions will be created. -static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp, - bool SimplifyOnly, - InstCombinerImpl &IC, - unsigned Depth = 0) { +static Value * +simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp, + bool SimplifyOnly, InstCombinerImpl &IC, + int Depth = MaxAnalysisRecursionDepth) { if (Op == RepOp) return nullptr; @@ -2333,16 +2333,16 @@ static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp, return RepOp; auto *I = dyn_cast(V); - if (!I || !I->isBitwiseLogicOp() || Depth >= 3) + if (!I || !I->isBitwiseLogicOp() || (MaxAnalysisRecursionDepth - Depth) >= 3) return nullptr; if (!I->hasOneUse()) SimplifyOnly = true; Value *NewOp0 = simplifyAndOrWithOpReplaced(I->getOperand(0), Op, RepOp, - SimplifyOnly, IC, Depth + 1); + SimplifyOnly, IC, Depth - 1); Value *NewOp1 = simplifyAndOrWithOpReplaced(I->getOperand(1), Op, RepOp, - SimplifyOnly, IC, Depth + 1); + SimplifyOnly, IC, Depth - 1); if (!NewOp0 && !NewOp1) return nullptr; @@ -5082,10 +5082,10 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) { if (Instruction *Abs = canonicalizeAbs(I, Builder)) return Abs; - // Otherwise, if all else failed, try to hoist the xor-by-constant: - // (X ^ C) ^ Y --> (X ^ Y) ^ C - // Just like we do in other places, we completely avoid the fold - // for constantexprs, at least to avoid endless combine loop. + // Otherwise, if all else failed, try to hoist the xor-by-constant: + // (X ^ C) ^ Y --> (X ^ Y) ^ C + // Just like we do in other places, we completely avoid the fold + // for constantexprs, at least to avoid endless combine loop. if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X), m_Unless(m_ConstantExpr())), m_ImmConstant(C1))), diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index c112fae351817..443a29a6ba0f2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4385,12 +4385,12 @@ Instruction *InstCombinerImpl::foldSelectICmp(CmpPredicate Pred, SelectInst *SI, // Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero) static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, - unsigned Depth = 0) { + int Depth = MaxAnalysisRecursionDepth) { if (Not ? match(V, m_NegatedPower2OrZero()) : match(V, m_LowBitMaskOrZero())) return true; if (V->getType()->getScalarSizeInBits() == 1) return true; - if (Depth++ >= MaxAnalysisRecursionDepth) + if (Depth-- <= 0) return false; Value *X; const Instruction *I = dyn_cast(V); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 334462d715f95..b2e43b755919d 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -199,19 +199,20 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix = ""); - KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, - FPClassTest Interested = fcAllFlags, - const Instruction *CtxI = nullptr, - unsigned Depth = 0) const { + KnownFPClass + computeKnownFPClass(Value *Val, FastMathFlags FMF, + FPClassTest Interested = fcAllFlags, + const Instruction *CtxI = nullptr, + int Depth = MaxAnalysisRecursionDepth) const { return llvm::computeKnownFPClass( Val, FMF, Interested, getSimplifyQuery().getWithInstruction(CtxI), Depth); } - KnownFPClass computeKnownFPClass(Value *Val, - FPClassTest Interested = fcAllFlags, - const Instruction *CtxI = nullptr, - unsigned Depth = 0) const { + KnownFPClass + computeKnownFPClass(Value *Val, FPClassTest Interested = fcAllFlags, + const Instruction *CtxI = nullptr, + int Depth = MaxAnalysisRecursionDepth) const { return llvm::computeKnownFPClass( Val, Interested, getSimplifyQuery().getWithInstruction(CtxI), Depth); } @@ -462,7 +463,11 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final /// If \p HasDereferenceable is true, the simplification will not perform /// same object checks. Value *simplifyNonNullOperand(Value *V, bool HasDereferenceable, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); + + Value *SimplifyDemandedVectorEltsInternal( + Value *V, APInt DemandedElts, APInt &PoisonElts, + bool AllowMultipleUsers = false, int Depth = MaxAnalysisRecursionDepth); public: /// Create and insert the idiom we use to indicate a block is unreachable @@ -559,12 +564,12 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final /// bits. Value *SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); using InstCombiner::SimplifyDemandedBits; bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth = 0) override; + int Depth = MaxAnalysisRecursionDepth) override; /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne /// bits. It also tries to handle simplifications that can be done based on @@ -573,7 +578,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. @@ -587,17 +592,17 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final bool SimplifyDemandedInstructionBits(Instruction &Inst, KnownBits &Known); Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, - APInt &PoisonElts, unsigned Depth = 0, + APInt &PoisonElts, bool AllowMultipleUsers = false) override; /// Attempts to replace V with a simpler value based on the demanded /// floating-point classes Value *SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); /// Common transforms for add / disjoint or Instruction *foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, @@ -769,7 +774,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); Instruction *foldSelectValueEquivalence(SelectInst &SI, CmpInst &CI); bool replaceInInstruction(Value *V, Value *Old, Value *New, - unsigned Depth = 0); + int Depth = MaxAnalysisRecursionDepth); Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside); @@ -807,11 +812,13 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final /// actual instructions, otherwise return a non-null dummy value. Return /// nullptr on failure. Note, if DoFold is true the caller must ensure that /// takeLog2 will succeed, otherwise it may create stray instructions. - Value *takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, bool DoFold); + Value *takeLog2(Value *Op, int Depth, bool AssumeNonZero, bool DoFold); Value *tryGetLog2(Value *Op, bool AssumeNonZero) { - if (takeLog2(Op, /*Depth=*/0, AssumeNonZero, /*DoFold=*/false)) - return takeLog2(Op, /*Depth=*/0, AssumeNonZero, /*DoFold=*/true); + if (takeLog2(Op, /*Depth=*/MaxAnalysisRecursionDepth, AssumeNonZero, + /*DoFold=*/false)) + return takeLog2(Op, /*Depth=*/MaxAnalysisRecursionDepth, AssumeNonZero, + /*DoFold=*/true); return nullptr; } }; @@ -829,6 +836,8 @@ class Negator final { SmallDenseMap NegationsCache; + int MaxDepth = 0; + Negator(LLVMContext &C, const DataLayout &DL, const DominatorTree &DT, bool IsTrulyNegation); @@ -842,9 +851,11 @@ class Negator final { std::array getSortedOperandsOfBinOp(Instruction *I); - [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth); + [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, + int Depth = MaxAnalysisRecursionDepth); - [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth); + [[nodiscard]] Value *negate(Value *V, bool IsNSW, + int Depth = MaxAnalysisRecursionDepth); /// Recurse depth-first and attempt to sink the negation. /// FIXME: use worklist? diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 324e6022f3f05..c916d8c4217b2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -987,7 +987,7 @@ static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { Value *InstCombinerImpl::simplifyNonNullOperand(Value *V, bool HasDereferenceable, - unsigned Depth) { + int Depth) { if (auto *Sel = dyn_cast(V)) { if (isa(Sel->getOperand(1))) return Sel->getOperand(2); @@ -1000,13 +1000,13 @@ Value *InstCombinerImpl::simplifyNonNullOperand(Value *V, return nullptr; constexpr unsigned RecursionLimit = 3; - if (Depth == RecursionLimit) + if ((MaxAnalysisRecursionDepth - Depth) == RecursionLimit) return nullptr; if (auto *GEP = dyn_cast(V)) { if (HasDereferenceable || GEP->isInBounds()) { if (auto *Res = simplifyNonNullOperand(GEP->getPointerOperand(), - HasDereferenceable, Depth + 1)) { + HasDereferenceable, Depth - 1)) { replaceOperand(*GEP, 0, Res); addToWorklist(GEP); return nullptr; @@ -1018,8 +1018,7 @@ Value *InstCombinerImpl::simplifyNonNullOperand(Value *V, bool Changed = false; for (Use &U : PHI->incoming_values()) { // We set Depth to RecursionLimit to avoid expensive recursion. - if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable, - RecursionLimit)) { + if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable, 0)) { replaceUse(U, Res); Changed = true; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 457199a72510e..d922dcbf80898 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1513,7 +1513,7 @@ Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) { return nullptr; } -Value *InstCombinerImpl::takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, +Value *InstCombinerImpl::takeLog2(Value *Op, int Depth, bool AssumeNonZero, bool DoFold) { auto IfFold = [DoFold](function_ref Fn) { if (!DoFold) @@ -1533,7 +1533,7 @@ Value *InstCombinerImpl::takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, }); // The remaining tests are all recursive, so bail out if we hit the limit. - if (Depth++ == MaxAnalysisRecursionDepth) + if (Depth-- <= 0) return nullptr; // log2(zext X) -> zext log2(X) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp index 2210336d92bf4..9347425660f32 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp @@ -83,7 +83,7 @@ static cl::opt NegatorEnabled("instcombine-negator-enabled", cl::init(true), cl::desc("Should we attempt to sink negations?")); -static cl::opt +static cl::opt NegatorMaxDepth("instcombine-negator-max-depth", cl::init(NegatorDefaultMaxDepth), cl::desc("What is the maximal lookup depth when trying to " @@ -119,7 +119,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { // FIXME: can this be reworked into a worklist-based algorithm while preserving // the depth-first, early bailout traversal? -[[nodiscard]] Value *Negator::visitImpl(Value *V, bool IsNSW, unsigned Depth) { +[[nodiscard]] Value *Negator::visitImpl(Value *V, bool IsNSW, int Depth) { // -(undef) -> undef. if (match(V, m_Undef())) return V; @@ -289,7 +289,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { } // Rest of the logic is recursive, so if it's time to give up then it's time. - if (Depth > NegatorMaxDepth) { + if (Depth <= 0) { LLVM_DEBUG(dbgs() << "Negator: reached maximal allowed traversal depth in " << *V << ". Giving up.\n"); ++NegatorTimesDepthLimitReached; @@ -299,7 +299,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { switch (I->getOpcode()) { case Instruction::Freeze: { // `freeze` is negatible if its operand is negatible. - Value *NegOp = negate(I->getOperand(0), IsNSW, Depth + 1); + Value *NegOp = negate(I->getOperand(0), IsNSW, Depth - 1); if (!NegOp) // Early return. return nullptr; return Builder.CreateFreeze(NegOp, I->getName() + ".neg"); @@ -313,7 +313,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { if (DT.dominates(PHI->getParent(), std::get<0>(I))) return nullptr; if (!(std::get<1>(I) = - negate(std::get<0>(I), IsNSW, Depth + 1))) // Early return. + negate(std::get<0>(I), IsNSW, Depth - 1))) // Early return. return nullptr; } // All incoming values are indeed negatible. Create negated PHI node. @@ -348,10 +348,10 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { return NewSelect; } // `select` is negatible if both hands of `select` are negatible. - Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth + 1); + Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth - 1); if (!NegOp1) // Early return. return nullptr; - Value *NegOp2 = negate(I->getOperand(2), IsNSW, Depth + 1); + Value *NegOp2 = negate(I->getOperand(2), IsNSW, Depth - 1); if (!NegOp2) return nullptr; // Do preserve the metadata! @@ -361,10 +361,10 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { case Instruction::ShuffleVector: { // `shufflevector` is negatible if both operands are negatible. auto *Shuf = cast(I); - Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth + 1); + Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth - 1); if (!NegOp0) // Early return. return nullptr; - Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth + 1); + Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth - 1); if (!NegOp1) return nullptr; return Builder.CreateShuffleVector(NegOp0, NegOp1, Shuf->getShuffleMask(), @@ -373,7 +373,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { case Instruction::ExtractElement: { // `extractelement` is negatible if source operand is negatible. auto *EEI = cast(I); - Value *NegVector = negate(EEI->getVectorOperand(), IsNSW, Depth + 1); + Value *NegVector = negate(EEI->getVectorOperand(), IsNSW, Depth - 1); if (!NegVector) // Early return. return nullptr; return Builder.CreateExtractElement(NegVector, EEI->getIndexOperand(), @@ -383,10 +383,10 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { // `insertelement` is negatible if both the source vector and // element-to-be-inserted are negatible. auto *IEI = cast(I); - Value *NegVector = negate(IEI->getOperand(0), IsNSW, Depth + 1); + Value *NegVector = negate(IEI->getOperand(0), IsNSW, Depth - 1); if (!NegVector) // Early return. return nullptr; - Value *NegNewElt = negate(IEI->getOperand(1), IsNSW, Depth + 1); + Value *NegNewElt = negate(IEI->getOperand(1), IsNSW, Depth - 1); if (!NegNewElt) // Early return. return nullptr; return Builder.CreateInsertElement(NegVector, NegNewElt, IEI->getOperand(2), @@ -394,7 +394,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { } case Instruction::Trunc: { // `trunc` is negatible if its operand is negatible. - Value *NegOp = negate(I->getOperand(0), /* IsNSW */ false, Depth + 1); + Value *NegOp = negate(I->getOperand(0), /* IsNSW */ false, Depth - 1); if (!NegOp) // Early return. return nullptr; return Builder.CreateTrunc(NegOp, I->getType(), I->getName() + ".neg"); @@ -402,7 +402,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { case Instruction::Shl: { // `shl` is negatible if the first operand is negatible. IsNSW &= I->hasNoSignedWrap(); - if (Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth + 1)) + if (Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth - 1)) return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg", /* HasNUW */ false, IsNSW); // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1< Negator::getSortedOperandsOfBinOp(Instruction *I) { SmallVector NegatedOps, NonNegatedOps; for (Value *Op : I->operands()) { // Can we sink the negation into this operand? - if (Value *NegOp = negate(Op, /* IsNSW */ false, Depth + 1)) { + if (Value *NegOp = negate(Op, /* IsNSW */ false, Depth - 1)) { NegatedOps.emplace_back(NegOp); // Successfully negated operand! continue; } @@ -473,10 +473,10 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { Value *NegatedOp, *OtherOp; // First try the second operand, in case it's a constant it will be best to // just invert it instead of sinking the `neg` deeper. - if (Value *NegOp1 = negate(Ops[1], /* IsNSW */ false, Depth + 1)) { + if (Value *NegOp1 = negate(Ops[1], /* IsNSW */ false, Depth - 1)) { NegatedOp = NegOp1; OtherOp = Ops[0]; - } else if (Value *NegOp0 = negate(Ops[0], /* IsNSW */ false, Depth + 1)) { + } else if (Value *NegOp0 = negate(Ops[0], /* IsNSW */ false, Depth - 1)) { NegatedOp = NegOp0; OtherOp = Ops[1]; } else @@ -492,8 +492,8 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { llvm_unreachable("Can't get here. We always return from switch."); } -[[nodiscard]] Value *Negator::negate(Value *V, bool IsNSW, unsigned Depth) { - NegatorMaxDepthVisited.updateMax(Depth); +[[nodiscard]] Value *Negator::negate(Value *V, bool IsNSW, int Depth) { + NegatorMaxDepthVisited.updateMax(MaxDepth - Depth); ++NegatorNumValuesVisited; #if LLVM_ENABLE_STATS @@ -531,7 +531,11 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { [[nodiscard]] std::optional Negator::run(Value *Root, bool IsNSW) { - Value *Negated = negate(Root, IsNSW, /*Depth=*/0); + MaxDepth = + NegatorMaxDepth.getNumOccurrences() + ? std::max(NegatorMaxDepth.getValue(), MaxAnalysisRecursionDepth) + : MaxAnalysisRecursionDepth; + Value *Negated = negate(Root, IsNSW, /*Depth=*/MaxDepth); if (!Negated) { // We must cleanup newly-inserted instructions, to avoid any potential // endless combine looping. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index d7d0431a5b8d0..7ae5def14c4ef 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -1231,9 +1231,9 @@ static Value *canonicalizeSPF(ICmpInst &Cmp, Value *TrueVal, Value *FalseVal, } bool InstCombinerImpl::replaceInInstruction(Value *V, Value *Old, Value *New, - unsigned Depth) { + int Depth) { // Conservatively limit replacement to two instructions upwards. - if (Depth == 2) + if ((MaxAnalysisRecursionDepth - Depth) == 2) return false; assert(!isa(Old) && "Only replace non-constant values"); @@ -1254,7 +1254,7 @@ bool InstCombinerImpl::replaceInInstruction(Value *V, Value *Old, Value *New, Worklist.add(I); Changed = true; } else { - Changed |= replaceInInstruction(U, Old, New, Depth + 1); + Changed |= replaceInInstruction(U, Old, New, Depth - 1); } } return Changed; @@ -3640,8 +3640,8 @@ static bool matchFMulByZeroIfResultEqZero(InstCombinerImpl &IC, Value *Cmp0, /// Check whether the KnownBits of a select arm may be affected by the /// select condition. static bool hasAffectedValue(Value *V, SmallPtrSetImpl &Affected, - unsigned Depth) { - if (Depth == MaxAnalysisRecursionDepth) + int Depth) { + if (Depth <= 0) return false; // Ignore the case where the select arm itself is affected. These cases @@ -3651,13 +3651,13 @@ static bool hasAffectedValue(Value *V, SmallPtrSetImpl &Affected, if (auto *I = dyn_cast(V)) { if (isa(I)) { - if (Depth == MaxAnalysisRecursionDepth - 1) + if (Depth == 1) return false; - Depth = MaxAnalysisRecursionDepth - 2; + Depth = 2; } return any_of(I->operands(), [&](Value *Op) { return Op->getType()->isIntOrIntVectorTy() && - hasAffectedValue(Op, Affected, Depth + 1); + hasAffectedValue(Op, Affected, Depth - 1); }); } @@ -4120,7 +4120,6 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { } } } - // Try to simplify a binop sandwiched between 2 selects with the same // condition. This is not valid for div/rem because the select might be // preventing a division-by-zero. @@ -4337,7 +4336,8 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { SimplifyQuery Q = SQ.getWithInstruction(&SI).getWithCondContext(CC); if (!CC.AffectedValues.empty()) { if (!isa(TrueVal) && - hasAffectedValue(TrueVal, CC.AffectedValues, /*Depth=*/0)) { + hasAffectedValue(TrueVal, CC.AffectedValues, + /*Depth=*/MaxAnalysisRecursionDepth)) { KnownBits Known = llvm::computeKnownBits(TrueVal, Q); if (Known.isConstant()) return replaceOperand(SI, 1, @@ -4346,7 +4346,8 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { CC.Invert = true; if (!isa(FalseVal) && - hasAffectedValue(FalseVal, CC.AffectedValues, /*Depth=*/0)) { + hasAffectedValue(FalseVal, CC.AffectedValues, + /*Depth=*/MaxAnalysisRecursionDepth)) { KnownBits Known = llvm::computeKnownBits(FalseVal, Q); if (Known.isConstant()) return replaceOperand(SI, 2, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 0e3436d12702d..2d74305915d94 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -30,7 +30,7 @@ static cl::opt "SimplifyDemandedBits() are consistent"), cl::Hidden, cl::init(false)); -static cl::opt SimplifyDemandedVectorEltsDepthLimit( +static cl::opt SimplifyDemandedVectorEltsDepthLimit( "instcombine-simplify-vector-elts-depth", cl::desc( "Depth limit when simplifying vector instructions and their operands"), @@ -95,8 +95,7 @@ bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst) { bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known, - const SimplifyQuery &Q, - unsigned Depth) { + const SimplifyQuery &Q, int Depth) { Use &U = I->getOperandUse(OpNo); Value *V = U.get(); if (isa(V)) { @@ -117,7 +116,7 @@ bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo, return false; } - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return false; Value *NewVal; @@ -165,9 +164,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, - unsigned Depth) { + int Depth) { assert(I != nullptr && "Null pointer of Value???"); - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + assert(Depth >= 0 && "Invalid Search Depth"); uint32_t BitWidth = DemandedMask.getBitWidth(); Type *VTy = I->getType(); assert( @@ -199,9 +198,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, // significant bit and all those below it. DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ); if (ShrinkDemandedConstant(I, 0, DemandedFromOps) || - SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Q, Depth + 1) || + SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Q, Depth - 1) || ShrinkDemandedConstant(I, 1, DemandedFromOps) || - SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1)) { + SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth - 1)) { disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); return true; } @@ -214,9 +213,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, break; case Instruction::And: { // If either the LHS or the RHS are Zero, the result is zero. - if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) || + if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth - 1) || SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown, Q, - Depth + 1)) + Depth - 1)) return I; Known = analyzeKnownBitsFromAndXorOr(cast(I), LHSKnown, RHSKnown, @@ -242,9 +241,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, } case Instruction::Or: { // If either the LHS or the RHS are One, the result is One. - if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) || + if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth - 1) || SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown, Q, - Depth + 1)) { + Depth - 1)) { // Disjoint flag may not longer hold. I->dropPoisonGeneratingFlags(); return I; @@ -282,8 +281,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, break; } case Instruction::Xor: { - if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) || - SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth + 1)) + if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth - 1) || + SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth - 1)) return I; Value *LHS, *RHS; if (DemandedMask == 1 && @@ -374,8 +373,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, break; } case Instruction::Select: { - if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Q, Depth + 1) || - SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Q, Depth + 1)) + if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Q, Depth - 1) || + SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Q, Depth - 1)) return I; // If the operands are constants, see if we can simplify them. @@ -447,7 +446,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth); KnownBits InputKnown(SrcBitWidth); if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Q, - Depth + 1)) { + Depth - 1)) { // For zext nneg, we may have dropped the instruction which made the // input non-negative. I->dropPoisonGeneratingFlags(); @@ -473,7 +472,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, InputDemandedBits.setBit(SrcBitWidth-1); KnownBits InputKnown(SrcBitWidth); - if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Q, Depth + 1)) + if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Q, Depth - 1)) return I; // If the input sign bit is known zero, or if the NewBits are not demanded @@ -534,7 +533,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, unsigned NLZ = DemandedMask.countl_zero(); APInt DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ); if (ShrinkDemandedConstant(I, 1, DemandedFromOps) || - SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1)) + SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth - 1)) return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); // If low order bits are not demanded and known to be zero in one operand, @@ -544,7 +543,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, APInt DemandedFromLHS = DemandedFromOps; DemandedFromLHS.clearLowBits(NTZ); if (ShrinkDemandedConstant(I, 0, DemandedFromLHS) || - SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth + 1)) + SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth - 1)) return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); // If we are known to be adding zeros to every bit below @@ -577,7 +576,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, unsigned NLZ = DemandedMask.countl_zero(); APInt DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ); if (ShrinkDemandedConstant(I, 1, DemandedFromOps) || - SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1)) + SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth - 1)) return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); // If low order bits are not demanded and are known to be zero in RHS, @@ -587,7 +586,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, APInt DemandedFromLHS = DemandedFromOps; DemandedFromLHS.clearLowBits(NTZ); if (ShrinkDemandedConstant(I, 0, DemandedFromLHS) || - SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth + 1)) + SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth - 1)) return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); // If we are known to be subtracting zeros from every bit below @@ -675,7 +674,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, if (I->hasNoSignedWrap()) { unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero(); unsigned SignBits = - ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1); + ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth - 1); if (SignBits > ShiftAmt && SignBits - ShiftAmt >= NumHiDemandedBits) return I->getOperand(0); } @@ -707,7 +706,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, else if (IOp->hasNoUnsignedWrap()) DemandedMaskIn.setHighBits(ShiftAmt); - if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1)) + if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth - 1)) return I; Known = KnownBits::shl(Known, @@ -720,7 +719,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, // demanding those bits from the pre-shifted operand either. if (unsigned CTLZ = DemandedMask.countl_zero()) { APInt DemandedFromOp(APInt::getLowBitsSet(BitWidth, BitWidth - CTLZ)); - if (SimplifyDemandedBits(I, 0, DemandedFromOp, Known, Q, Depth + 1)) { + if (SimplifyDemandedBits(I, 0, DemandedFromOp, Known, Q, Depth - 1)) { // We can't guarantee that nsw/nuw hold after simplifying the operand. I->dropPoisonGeneratingFlags(); return I; @@ -757,7 +756,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, // need to shift. unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero(); unsigned SignBits = - ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1); + ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth - 1); if (SignBits >= NumHiDemandedBits) return I->getOperand(0); @@ -790,7 +789,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, // Unsigned shift right. APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); - if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1)) { + if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth - 1)) { // exact flag may not longer hold. I->dropPoisonGeneratingFlags(); return I; @@ -805,7 +804,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, break; } case Instruction::AShr: { - unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1); + unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth - 1); // If we only want bits that already match the signbit then we don't need // to shift. @@ -835,7 +834,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, bool ShiftedInBitsDemanded = DemandedMask.countl_zero() < ShiftAmt; if (ShiftedInBitsDemanded) DemandedMaskIn.setSignBit(); - if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1)) { + if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth - 1)) { // exact flag may not longer hold. I->dropPoisonGeneratingFlags(); return I; @@ -867,7 +866,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, unsigned RHSTrailingZeros = SA->countr_zero(); APInt DemandedMaskIn = APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros); - if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Q, Depth + 1)) { + if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Q, Depth - 1)) { // We can't guarantee that "exact" is still true after changing the // the dividend. I->dropPoisonGeneratingFlags(); @@ -889,7 +888,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, APInt LowBits = *Rem - 1; APInt Mask2 = LowBits | APInt::getSignMask(BitWidth); - if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Q, Depth + 1)) + if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Q, Depth - 1)) return I; Known = KnownBits::srem(LHSKnown, KnownBits::makeConstant(*Rem)); break; @@ -951,10 +950,10 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, unsigned MaskWidth = I->getOperand(1)->getType()->getScalarSizeInBits(); RHSKnown = KnownBits(MaskWidth); // If either the LHS or the RHS are Zero, the result is zero. - if (SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth + 1) || + if (SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth - 1) || SimplifyDemandedBits( I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth), - RHSKnown, Q, Depth + 1)) + RHSKnown, Q, Depth - 1)) return I; // TODO: Should be 1-extend @@ -995,7 +994,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, m_PtrAdd(m_Value(InnerPtr), m_ConstantInt(GEPIndex)), m_ConstantInt(PtrMaskImmediate)))) { - LHSKnown = computeKnownBits(InnerPtr, I, Depth + 1); + LHSKnown = computeKnownBits(InnerPtr, I, Depth - 1); if (!LHSKnown.isZero()) { const unsigned trailingZeros = LHSKnown.countMinTrailingZeros(); uint64_t PointerAlignBits = (uint64_t(1) << trailingZeros) - 1; @@ -1041,9 +1040,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt)); if (I->getOperand(0) != I->getOperand(1)) { if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Q, - Depth + 1) || + Depth - 1) || SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Q, - Depth + 1)) { + Depth - 1)) { // Range attribute may no longer hold. I->dropPoisonGeneratingReturnAttributes(); return I; @@ -1051,14 +1050,14 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, } else { // fshl is a rotate // Avoid converting rotate into funnel shift. // Only simplify if one operand is constant. - LHSKnown = computeKnownBits(I->getOperand(0), I, Depth + 1); + LHSKnown = computeKnownBits(I->getOperand(0), I, Depth - 1); if (DemandedMaskLHS.isSubsetOf(LHSKnown.Zero | LHSKnown.One) && !match(I->getOperand(0), m_SpecificInt(LHSKnown.One))) { replaceOperand(*I, 0, Constant::getIntegerValue(VTy, LHSKnown.One)); return I; } - RHSKnown = computeKnownBits(I->getOperand(1), I, Depth + 1); + RHSKnown = computeKnownBits(I->getOperand(1), I, Depth - 1); if (DemandedMaskRHS.isSubsetOf(RHSKnown.Zero | RHSKnown.One) && !match(I->getOperand(1), m_SpecificInt(RHSKnown.One))) { replaceOperand(*I, 1, Constant::getIntegerValue(VTy, RHSKnown.One)); @@ -1145,7 +1144,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, /// DemandedMask, but without modifying the Instruction. Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( Instruction *I, const APInt &DemandedMask, KnownBits &Known, - const SimplifyQuery &Q, unsigned Depth) { + const SimplifyQuery &Q, int Depth) { unsigned BitWidth = DemandedMask.getBitWidth(); Type *ITy = I->getType(); @@ -1158,8 +1157,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( // this instruction has a simpler value in that context. switch (I->getOpcode()) { case Instruction::And: { - llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1); - llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth - 1); + llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth - 1); Known = analyzeKnownBitsFromAndXorOr(cast(I), LHSKnown, RHSKnown, Q, Depth); computeKnownBitsFromContext(I, Known, Q, Depth); @@ -1179,8 +1178,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( break; } case Instruction::Or: { - llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1); - llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth - 1); + llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth - 1); Known = analyzeKnownBitsFromAndXorOr(cast(I), LHSKnown, RHSKnown, Q, Depth); computeKnownBitsFromContext(I, Known, Q, Depth); @@ -1202,8 +1201,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( break; } case Instruction::Xor: { - llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1); - llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth - 1); + llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth - 1); Known = analyzeKnownBitsFromAndXorOr(cast(I), LHSKnown, RHSKnown, Q, Depth); computeKnownBitsFromContext(I, Known, Q, Depth); @@ -1229,11 +1228,11 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( // If an operand adds zeros to every bit below the highest demanded bit, // that operand doesn't change the result. Return the other side. - llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth - 1); if (DemandedFromOps.isSubsetOf(RHSKnown.Zero)) return I->getOperand(0); - llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth - 1); if (DemandedFromOps.isSubsetOf(LHSKnown.Zero)) return I->getOperand(1); @@ -1249,13 +1248,13 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( // If an operand subtracts zeros from every bit below the highest demanded // bit, that operand doesn't change the result. Return the other side. - llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth - 1); if (DemandedFromOps.isSubsetOf(RHSKnown.Zero)) return I->getOperand(0); bool NSW = cast(I)->hasNoSignedWrap(); bool NUW = cast(I)->hasNoUnsignedWrap(); - llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1); + llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth - 1); Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW); computeKnownBitsFromContext(I, Known, Q, Depth); break; @@ -1397,8 +1396,15 @@ Value *InstCombinerImpl::simplifyShrShlDemandedBits( Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, - unsigned Depth, bool AllowMultipleUsers) { + return SimplifyDemandedVectorEltsInternal( + V, DemandedElts, PoisonElts, AllowMultipleUsers, + SimplifyDemandedVectorEltsDepthLimit.getValue()); +} + +Value *InstCombinerImpl::SimplifyDemandedVectorEltsInternal( + Value *V, APInt DemandedElts, APInt &PoisonElts, bool AllowMultipleUsers, + int Depth) { // Cannot analyze scalable type. The number of vector elements is not a // compile-time constant. if (isa(V->getType())) @@ -1451,8 +1457,9 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, } // Limit search depth. - if (Depth == SimplifyDemandedVectorEltsDepthLimit) + if (Depth <= 0) { return nullptr; + } if (!AllowMultipleUsers) { // If multiple users are using the root value, proceed with @@ -1462,7 +1469,7 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, // Quit if we find multiple users of a non-root value though. // They'll be handled when it's their turn to be visited by // the main instcombine process. - if (Depth != 0) + if (Depth < SimplifyDemandedVectorEltsDepthLimit.getValue()) // TODO: Just compute the PoisonElts information recursively. return nullptr; @@ -1470,7 +1477,6 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, DemandedElts = EltMask; } } - Instruction *I = dyn_cast(V); if (!I) return nullptr; // Only analyze instructions. @@ -1479,7 +1485,8 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, APInt Demanded, APInt &Undef) { auto *II = dyn_cast(Inst); Value *Op = II ? II->getArgOperand(OpNum) : Inst->getOperand(OpNum); - if (Value *V = SimplifyDemandedVectorElts(Op, Demanded, Undef, Depth + 1)) { + if (Value *V = SimplifyDemandedVectorEltsInternal( + Op, Demanded, Undef, /*AllowMultipleUsers=*/false, Depth - 1)) { replaceOperand(*Inst, OpNum, V); MadeChange = true; } @@ -1969,8 +1976,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, - unsigned Depth) { - assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); + int Depth) { + assert(Depth && "Invalid Search Depth"); Type *VTy = V->getType(); assert(Known == KnownFPClass() && "expected uninitialized state"); @@ -1978,13 +1985,13 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, if (DemandedMask == fcNone) return isa(V) ? nullptr : PoisonValue::get(VTy); - if (Depth == MaxAnalysisRecursionDepth) + if (Depth <= 0) return nullptr; Instruction *I = dyn_cast(V); if (!I) { // Handle constants and arguments - Known = computeKnownFPClass(V, fcAllFlags, CxtI, Depth + 1); + Known = computeKnownFPClass(V, fcAllFlags, CxtI, Depth - 1); Value *FoldedToConst = getFPClassConstant(VTy, DemandedMask & Known.KnownFPClasses); return FoldedToConst == V ? nullptr : FoldedToConst; @@ -2002,7 +2009,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, switch (I->getOpcode()) { case Instruction::FNeg: { if (SimplifyDemandedFPClass(I, 0, llvm::fneg(DemandedMask), Known, - Depth + 1)) + Depth - 1)) return I; Known.fneg(); break; @@ -2012,18 +2019,18 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, switch (CI->getIntrinsicID()) { case Intrinsic::fabs: if (SimplifyDemandedFPClass(I, 0, llvm::inverse_fabs(DemandedMask), Known, - Depth + 1)) + Depth - 1)) return I; Known.fabs(); break; case Intrinsic::arithmetic_fence: - if (SimplifyDemandedFPClass(I, 0, DemandedMask, Known, Depth + 1)) + if (SimplifyDemandedFPClass(I, 0, DemandedMask, Known, Depth - 1)) return I; break; case Intrinsic::copysign: { // Flip on more potentially demanded classes const FPClassTest DemandedMaskAnySign = llvm::unknown_sign(DemandedMask); - if (SimplifyDemandedFPClass(I, 0, DemandedMaskAnySign, Known, Depth + 1)) + if (SimplifyDemandedFPClass(I, 0, DemandedMaskAnySign, Known, Depth - 1)) return I; if ((DemandedMask & fcNegative) == DemandedMask) { @@ -2039,12 +2046,12 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, } KnownFPClass KnownSign = - computeKnownFPClass(I->getOperand(1), fcAllFlags, CxtI, Depth + 1); + computeKnownFPClass(I->getOperand(1), fcAllFlags, CxtI, Depth - 1); Known.copysign(KnownSign); break; } default: - Known = computeKnownFPClass(I, ~DemandedMask, CxtI, Depth + 1); + Known = computeKnownFPClass(I, ~DemandedMask, CxtI, Depth - 1); break; } @@ -2052,8 +2059,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, } case Instruction::Select: { KnownFPClass KnownLHS, KnownRHS; - if (SimplifyDemandedFPClass(I, 2, DemandedMask, KnownRHS, Depth + 1) || - SimplifyDemandedFPClass(I, 1, DemandedMask, KnownLHS, Depth + 1)) + if (SimplifyDemandedFPClass(I, 2, DemandedMask, KnownRHS, Depth - 1) || + SimplifyDemandedFPClass(I, 1, DemandedMask, KnownLHS, Depth - 1)) return I; if (KnownLHS.isKnownNever(DemandedMask)) @@ -2066,7 +2073,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, break; } default: - Known = computeKnownFPClass(I, ~DemandedMask, CxtI, Depth + 1); + Known = computeKnownFPClass(I, ~DemandedMask, CxtI, Depth - 1); break; } @@ -2075,8 +2082,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V, bool InstCombinerImpl::SimplifyDemandedFPClass(Instruction *I, unsigned OpNo, FPClassTest DemandedMask, - KnownFPClass &Known, - unsigned Depth) { + KnownFPClass &Known, int Depth) { Use &U = I->getOperandUse(OpNo); Value *NewVal = SimplifyDemandedUseFPClass(U.get(), DemandedMask, Known, I, Depth); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index f946c3856948b..273a12cf75fe5 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -600,9 +600,9 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) { APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec); if (!DemandedElts.isAllOnes()) { APInt PoisonElts(NumElts, 0); - if (Value *V = SimplifyDemandedVectorElts( - SrcVec, DemandedElts, PoisonElts, 0 /* Depth */, - true /* AllowMultipleUsers */)) { + if (Value *V = + SimplifyDemandedVectorElts(SrcVec, DemandedElts, PoisonElts, + /*AllowMultipleUsers=*/true)) { if (V != SrcVec) { Worklist.addValue(SrcVec); SrcVec->replaceAllUsesWith(V); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 439a86d951a83..1a9851ca918cc 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2580,7 +2580,7 @@ Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP, Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, - bool &DoesConsume, unsigned Depth) { + bool &DoesConsume, int Depth) { static Value *const NonNull = reinterpret_cast(uintptr_t(1)); // ~(~(X)) -> X. Value *A, *B; @@ -2594,7 +2594,7 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, if (match(V, m_ImmConstant(C))) return ConstantExpr::getNot(C); - if (Depth++ >= MaxAnalysisRecursionDepth) + if (Depth-- <= 0) return nullptr; // The rest of the cases require that we invert all uses so don't bother @@ -2686,9 +2686,10 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, SmallVector, 8> IncomingValues; for (Use &U : PN->operands()) { BasicBlock *IncomingBlock = PN->getIncomingBlock(U); - Value *NewIncomingVal = getFreelyInvertedImpl( - U.get(), /*WillInvertAllUses=*/false, - /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1); + Value *NewIncomingVal = + getFreelyInvertedImpl(U.get(), /*WillInvertAllUses=*/false, + /*Builder=*/nullptr, LocalDoesConsume, + /*Depth=*/1); if (NewIncomingVal == nullptr) return nullptr; // Make sure that we can safely erase the original PHI node. diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp index cbad5dd357687..6eb2297fedc46 100644 --- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp +++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp @@ -822,8 +822,10 @@ ConstraintTy ConstraintInfo::getConstraintForSolving(CmpInst::Predicate Pred, // unsigned ones. This increases the reasoning effectiveness in combination // with the signed <-> unsigned transfer logic. if (CmpInst::isSigned(Pred) && - isKnownNonNegative(Op0, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1) && - isKnownNonNegative(Op1, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1)) + isKnownNonNegative(Op0, DL, + /*Depth=*/1) && + isKnownNonNegative(Op1, DL, + /*Depth=*/1)) Pred = ICmpInst::getUnsignedPredicate(Pred); SmallVector NewVariables; @@ -896,7 +898,8 @@ void ConstraintInfo::transferToOtherSystem( unsigned NumOut, SmallVectorImpl &DFSInStack) { auto IsKnownNonNegative = [this](Value *V) { return doesHold(CmpInst::ICMP_SGE, V, ConstantInt::get(V->getType(), 0)) || - isKnownNonNegative(V, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1); + isKnownNonNegative(V, DL, + /*Depth=*/1); }; // Check if we can combine facts from the signed and unsigned systems to // derive additional facts. diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 320b79203c0b3..5079b2b1c51bd 100644 --- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -631,8 +631,9 @@ APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended, ConstantOffset = CI->getValue(); } else if (BinaryOperator *BO = dyn_cast(V)) { // Trace into subexpressions for more hoisting opportunities. - if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) + if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) { ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended); + } } else if (isa(V)) { ConstantOffset = find(U->getOperand(0), SignExtended, ZeroExtended, NonNegative) diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp index e23005b60891d..a79c013c9989c 100644 --- a/llvm/unittests/Analysis/ValueTrackingTest.cpp +++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp @@ -3261,7 +3261,8 @@ TEST_F(ValueTrackingTest, ComputeConstantRange) { // Check the depth cutoff results in a conservative result (full set) by // passing Depth == MaxDepth == 6. - ConstantRange CR3 = computeConstantRange(X2, false, true, &AC, I, nullptr, 6); + ConstantRange CR3 = + computeConstantRange(X2, false, true, &AC, I, nullptr, 0); EXPECT_TRUE(CR3.isFullSet()); } {