@@ -800,8 +800,13 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
800800 Value *Ptr, PredicatedScalarEvolution &PSE) {
801801 // The access function must stride over the innermost loop.
802802 if (Lp != AR->getLoop ()) {
803- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not striding over innermost loop "
804- << *Ptr << " SCEV: " << *AR << " \n " );
803+ LLVM_DEBUG ({
804+ dbgs () << " LAA: Bad stride - Not striding over innermost loop " ;
805+ if (Ptr)
806+ dbgs () << *Ptr << " " ;
807+
808+ dbgs () << " SCEV: " << *AR << " \n " ;
809+ });
805810 return std::nullopt ;
806811 }
807812
@@ -811,8 +816,12 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
811816 // Calculate the pointer stride and check if it is constant.
812817 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
813818 if (!C) {
814- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not a constant strided " << *Ptr
815- << " SCEV: " << *AR << " \n " );
819+ LLVM_DEBUG ({
820+ dbgs () << " LAA: Bad stride - Not a constant strided " ;
821+ if (Ptr)
822+ dbgs () << *Ptr << " " ;
823+ dbgs () << " SCEV: " << *AR << " \n " ;
824+ });
816825 return std::nullopt ;
817826 }
818827
@@ -839,8 +848,8 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
839848static bool isNoWrapGEP (Value *Ptr, PredicatedScalarEvolution &PSE,
840849 const Loop *L);
841850
842- // / Check whether \p AR is a non-wrapping AddRec, or if \p Ptr is a non-wrapping
843- // / GEP .
851+ // / Check whether a pointer address cannot wrap. If \p Ptr is not nullptr, use
852+ // / informating from the IR pointer value to determine no-wrap .
844853static bool isNoWrap (PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
845854 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
846855 std::optional<int64_t > Stride = std::nullopt ) {
@@ -861,7 +870,7 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
861870 // location will be larger than half the pointer index type space. In that
862871 // case, the GEP would be poison and any memory access dependent on it would
863872 // be immediate UB when executed.
864- if (auto *GEP = dyn_cast <GetElementPtrInst>(Ptr);
873+ if (auto *GEP = dyn_cast_if_present <GetElementPtrInst>(Ptr);
865874 GEP && GEP->hasNoUnsignedSignedWrap ())
866875 return true ;
867876
@@ -877,6 +886,9 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
877886 return true ;
878887 }
879888
889+ if (!Ptr)
890+ return false ;
891+
880892 if (Assume) {
881893 PSE.setNoOverflow (Ptr, SCEVWrapPredicate::IncrementNUSW);
882894 LLVM_DEBUG (dbgs () << " LAA: Pointer may wrap:\n "
@@ -1144,13 +1156,10 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
11441156
11451157 // When we run after a failing dependency check we have to make sure
11461158 // we don't have wrapping pointers.
1147- if (ShouldCheckWrap) {
1148- // Skip wrap checking when translating pointers.
1149- if (TranslatedPtrs.size () > 1 )
1150- return false ;
1151-
1152- if (!isNoWrap (PSE, AR, Ptr, AccessTy, TheLoop, Assume))
1153- return false ;
1159+ if (ShouldCheckWrap &&
1160+ !isNoWrap (PSE, AR, TranslatedPtrs.size () == 1 ? Ptr : nullptr , AccessTy,
1161+ TheLoop, Assume)) {
1162+ return false ;
11541163 }
11551164 }
11561165
@@ -1457,6 +1466,9 @@ void AccessAnalysis::processMemAccesses() {
14571466// / Check whether \p Ptr is non-wrapping GEP.
14581467static bool isNoWrapGEP (Value *Ptr, PredicatedScalarEvolution &PSE,
14591468 const Loop *L) {
1469+ if (PSE.hasNoOverflow (Ptr, SCEVWrapPredicate::IncrementNUSW))
1470+ return true ;
1471+
14601472 // Scalar evolution does not propagate the non-wrapping flags to values that
14611473 // are derived from a non-wrapping induction variable because non-wrapping
14621474 // could be flow-sensitive.
0 commit comments