Skip to content

Conversation

@skachkov-sc
Copy link
Contributor

@skachkov-sc skachkov-sc commented May 20, 2025

@llvmbot llvmbot added the llvm:analysis Includes value tracking, cost tables and constant folding label May 20, 2025
@llvmbot
Copy link
Member

llvmbot commented May 20, 2025

@llvm/pr-subscribers-llvm-analysis

Author: Sergey Kachkov (skachkov-sc)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/140721.diff

2 Files Affected:

  • (modified) llvm/lib/Analysis/LoopAccessAnalysis.cpp (+19-4)
  • (added) llvm/test/Analysis/LoopAccessAnalysis/monotonic-pointers.ll (+108)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 2a322a69a0dbf..b5f3b7ea490e0 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -23,6 +23,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/IVDescriptors.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/LoopIterator.h"
@@ -854,7 +855,8 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
     return true;
 
-  if (Ptr && PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
+  if (Ptr && isa<SCEVAddRecExpr>(PSE.getSCEV(Ptr)) &&
+      PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
     return true;
 
   // The address calculation must not wrap. Otherwise, a dependence could be
@@ -1088,6 +1090,20 @@ static void findForkedSCEVs(
   }
 }
 
+// Conservatively replace SCEV of Ptr value if it can't be computed directly,
+// e.g. for monotonic values (they can be treated as affine AddRecs that are
+// updated under some predicate).
+static const SCEV *
+replacePtrSCEV(PredicatedScalarEvolution &PSE, Value *Ptr,
+               const DenseMap<Value *, const SCEV *> &StridesMap,
+               const Loop *L) {
+  ScalarEvolution *SE = PSE.getSE();
+  if (MonotonicDescriptor MD;
+      MonotonicDescriptor::isMonotonicVal(Ptr, L, MD, *SE))
+    return MD.getExpr();
+  return replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
+}
+
 static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
 findForkedPointer(PredicatedScalarEvolution &PSE,
                   const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
@@ -1110,7 +1126,7 @@ findForkedPointer(PredicatedScalarEvolution &PSE,
     return Scevs;
   }
 
-  return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
+  return {{replacePtrSCEV(PSE, Ptr, StridesMap, L), false}};
 }
 
 bool AccessAnalysis::createCheckForAccess(
@@ -1141,8 +1157,7 @@ bool AccessAnalysis::createCheckForAccess(
     // If there's only one option for Ptr, look it up after bounds and wrap
     // checking, because assumptions might have been added to PSE.
     if (TranslatedPtrs.size() == 1) {
-      AR =
-          cast<SCEVAddRecExpr>(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr));
+      AR = cast<SCEVAddRecExpr>(replacePtrSCEV(PSE, Ptr, StridesMap, TheLoop));
       P.setPointer(AR);
     }
 
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/monotonic-pointers.ll b/llvm/test/Analysis/LoopAccessAnalysis/monotonic-pointers.ll
new file mode 100644
index 0000000000000..7956240529fd2
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/monotonic-pointers.ll
@@ -0,0 +1,108 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -disable-output -passes='print<access-info>' %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @monotonic_ptr_simple(ptr writeonly %dst, ptr readonly %src, i32 %c, i32 %n) {
+; CHECK-LABEL: 'monotonic_ptr_simple'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe with run-time checks
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Check 0:
+; CHECK-NEXT:        Comparing group ([[GRP1:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %dst.addr.09 = phi ptr [ %dst, %entry ], [ %dst.addr.1, %for.inc ]
+; CHECK-NEXT:        Against group ([[GRP2:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-NEXT:        Group [[GRP1]]:
+; CHECK-NEXT:          (Low: %dst High: ((4 * (zext i32 %n to i64))<nuw><nsw> + %dst))
+; CHECK-NEXT:            Member: {%dst,+,4}<nsw><%for.body>
+; CHECK-NEXT:        Group [[GRP2]]:
+; CHECK-NEXT:          (Low: %src High: ((4 * (zext i32 %n to i64))<nuw><nsw> + %src))
+; CHECK-NEXT:            Member: {%src,+,4}<nuw><%for.body>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+entry:
+  %wide.trip.count = zext nneg i32 %n to i64
+  br label %for.body
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+  %dst.addr.09 = phi ptr [ %dst, %entry ], [ %dst.addr.1, %for.inc ]
+  %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
+  %cmp1 = icmp slt i32 %0, %c
+  br i1 %cmp1, label %if.then, label %for.inc
+
+if.then:
+  %incdec.ptr = getelementptr inbounds i8, ptr %dst.addr.09, i64 4
+  store i32 %0, ptr %dst.addr.09, align 4
+  br label %for.inc
+
+for.inc:
+  %dst.addr.1 = phi ptr [ %incdec.ptr, %if.then ], [ %dst.addr.09, %for.body ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @monotonic_ptr_indexed(ptr writeonly %dst, ptr readonly %src, i32 %c, i32 %n) {
+; CHECK-LABEL: 'monotonic_ptr_indexed'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe with run-time checks
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Check 0:
+; CHECK-NEXT:        Comparing group ([[GRP3:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %arrayidx5 = getelementptr inbounds i32, ptr %dst, i64 %idxprom4
+; CHECK-NEXT:        Against group ([[GRP4:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-NEXT:        Group [[GRP3]]:
+; CHECK-NEXT:          (Low: %dst High: ((4 * (zext i32 %n to i64))<nuw><nsw> + %dst))
+; CHECK-NEXT:            Member: {%dst,+,4}<%for.body>
+; CHECK-NEXT:        Group [[GRP4]]:
+; CHECK-NEXT:          (Low: %src High: ((4 * (zext i32 %n to i64))<nuw><nsw> + %src))
+; CHECK-NEXT:            Member: {%src,+,4}<nuw><%for.body>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+entry:
+  %wide.trip.count = zext nneg i32 %n to i64
+  br label %for.body
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+  %idx.012 = phi i32 [ 0, %entry ], [ %idx.1, %for.inc ]
+  %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
+  %cmp1 = icmp slt i32 %0, %c
+  br i1 %cmp1, label %if.then, label %for.inc
+
+if.then:
+  %inc = add nsw i32 %idx.012, 1
+  %idxprom4 = sext i32 %idx.012 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %dst, i64 %idxprom4
+  store i32 %0, ptr %arrayidx5, align 4
+  br label %for.inc
+
+for.inc:
+  %idx.1 = phi i32 [ %inc, %if.then ], [ %idx.012, %for.body ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}

@skachkov-sc
Copy link
Contributor Author

Gentle ping

@skachkov-sc skachkov-sc requested review from artagnon and npanchen May 27, 2025 09:07
@skachkov-sc skachkov-sc force-pushed the users/skachkov-sc/monotonic-descriptor branch from 9c543ba to 88a5869 Compare November 6, 2025 16:41
@skachkov-sc skachkov-sc force-pushed the users/skachkov-sc/monotonic-access-laa branch from e9bea41 to 18aa00f Compare November 7, 2025 11:58
@skachkov-sc skachkov-sc force-pushed the users/skachkov-sc/monotonic-descriptor branch 2 times, most recently from 1bfefd3 to d828715 Compare November 12, 2025 10:38
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

llvm:analysis Includes value tracking, cost tables and constant folding

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants