-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[LVI] Look through pointer operand when evaluating ptrtoints #162627
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[LVI] Look through pointer operand when evaluating ptrtoints #162627
Conversation
Add support for constraints over `ptrtoint` casts. Particularly, assumed predicate invariants over the integral value of a pointer are constraints on the cast to integer value itself. Fixes: llvm#158324.
@llvm/pr-subscribers-llvm-transforms Author: Antonio Frighetto (antoniofrighetto) ChangesAdd support for constraints over Fixes: #158324. Full diff: https://github.com/llvm/llvm-project/pull/162627.diff 2 Files Affected:
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 0e5bc481383a0..bc6b704bbc943 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -966,6 +966,7 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
// recurse on our operand. This can cut a long search short if we know we're
// not going to be able to get any useful information anways.
switch (CI->getOpcode()) {
+ case Instruction::PtrToInt:
case Instruction::Trunc:
case Instruction::SExt:
case Instruction::ZExt:
@@ -977,6 +978,11 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
return ValueLatticeElement::getOverdefined();
}
+ // Assumed predicate over the integral value of the pointer are constraints on
+ // the cast to integer value itself.
+ if (auto *PTI = dyn_cast<PtrToIntInst>(CI))
+ return getBlockValue(PTI->getPointerOperand(), BB, PTI);
+
// Figure out the range of the LHS. If that fails, we still apply the
// transfer rule on the full set since we may be able to locally infer
// interesting facts.
@@ -1350,6 +1356,20 @@ std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
}
Type *Ty = Val->getType();
+
+ // On the off-chance we may compute a range over the address of a pointer.
+ ConstantInt *CI = nullptr;
+ if (Ty->isPointerTy() && LHS == Val &&
+ match(RHS, m_IntToPtr(m_ConstantInt(CI)))) {
+ if (Ty->getPointerAddressSpace() ==
+ RHS->getType()->getPointerAddressSpace()) {
+ ConstantRange RHSRange(CI->getValue());
+ ConstantRange AllowedR =
+ ConstantRange::makeAllowedICmpRegion(EdgePred, RHSRange);
+ return ValueLatticeElement::getRange(AllowedR);
+ }
+ }
+
if (!Ty->isIntegerTy())
return ValueLatticeElement::getOverdefined();
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
index c9ee233b5a461..68fc73743f607 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
@@ -355,3 +355,58 @@ define i8 @test_umax_nneg(i8 %a, i8 %b) {
%ret = call i8 @llvm.umax.i8(i8 %nneg_a, i8 %nneg_b)
ret i8 %ret
}
+
+define i8 @test_umin_ptr_address(ptr %p) {
+; CHECK-LABEL: @test_umin_ptr_address(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw nsw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[P]], inttoptr (i64 176093659136 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[PI_HI]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ult ptr %p, inttoptr (i64 176093659136 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
+
+define i8 @test_umax_ptr_address(ptr %p) {
+; CHECK-LABEL: @test_umax_ptr_address(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P]], inttoptr (i64 180388626431 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[PI_HI]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umax.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ugt ptr %p, inttoptr (i64 180388626431 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
+
+define i8 @test_umin_ptr_address_negative(ptr %p) {
+; CHECK-LABEL: @test_umin_ptr_address_negative(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[PI_HI]], i8 41)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[P]], inttoptr (i64 176093659136 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[UMIN]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ne ptr %p, inttoptr (i64 176093659136 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
|
@llvm/pr-subscribers-llvm-analysis Author: Antonio Frighetto (antoniofrighetto) ChangesAdd support for constraints over Fixes: #158324. Full diff: https://github.com/llvm/llvm-project/pull/162627.diff 2 Files Affected:
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 0e5bc481383a0..bc6b704bbc943 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -966,6 +966,7 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
// recurse on our operand. This can cut a long search short if we know we're
// not going to be able to get any useful information anways.
switch (CI->getOpcode()) {
+ case Instruction::PtrToInt:
case Instruction::Trunc:
case Instruction::SExt:
case Instruction::ZExt:
@@ -977,6 +978,11 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
return ValueLatticeElement::getOverdefined();
}
+ // Assumed predicate over the integral value of the pointer are constraints on
+ // the cast to integer value itself.
+ if (auto *PTI = dyn_cast<PtrToIntInst>(CI))
+ return getBlockValue(PTI->getPointerOperand(), BB, PTI);
+
// Figure out the range of the LHS. If that fails, we still apply the
// transfer rule on the full set since we may be able to locally infer
// interesting facts.
@@ -1350,6 +1356,20 @@ std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
}
Type *Ty = Val->getType();
+
+ // On the off-chance we may compute a range over the address of a pointer.
+ ConstantInt *CI = nullptr;
+ if (Ty->isPointerTy() && LHS == Val &&
+ match(RHS, m_IntToPtr(m_ConstantInt(CI)))) {
+ if (Ty->getPointerAddressSpace() ==
+ RHS->getType()->getPointerAddressSpace()) {
+ ConstantRange RHSRange(CI->getValue());
+ ConstantRange AllowedR =
+ ConstantRange::makeAllowedICmpRegion(EdgePred, RHSRange);
+ return ValueLatticeElement::getRange(AllowedR);
+ }
+ }
+
if (!Ty->isIntegerTy())
return ValueLatticeElement::getOverdefined();
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
index c9ee233b5a461..68fc73743f607 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
@@ -355,3 +355,58 @@ define i8 @test_umax_nneg(i8 %a, i8 %b) {
%ret = call i8 @llvm.umax.i8(i8 %nneg_a, i8 %nneg_b)
ret i8 %ret
}
+
+define i8 @test_umin_ptr_address(ptr %p) {
+; CHECK-LABEL: @test_umin_ptr_address(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw nsw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[P]], inttoptr (i64 176093659136 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[PI_HI]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ult ptr %p, inttoptr (i64 176093659136 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
+
+define i8 @test_umax_ptr_address(ptr %p) {
+; CHECK-LABEL: @test_umax_ptr_address(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P]], inttoptr (i64 180388626431 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[PI_HI]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umax.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ugt ptr %p, inttoptr (i64 180388626431 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
+
+define i8 @test_umin_ptr_address_negative(ptr %p) {
+; CHECK-LABEL: @test_umin_ptr_address_negative(
+; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32
+; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8
+; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[PI_HI]], i8 41)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[P]], inttoptr (i64 176093659136 to ptr)
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i8 [[UMIN]]
+;
+ %pi = ptrtoint ptr %p to i64
+ %pi.shr = lshr i64 %pi, 32
+ %pi.hi = trunc nuw i64 %pi.shr to i8
+ %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41)
+ %cmp = icmp ne ptr %p, inttoptr (i64 176093659136 to ptr)
+ call void @llvm.assume(i1 %cmp)
+ ret i8 %umin
+}
|
Reduced from CI failure: ; ./bin/opt -S -p correlated-propagation test.ll
define i32 @src(ptr %p, i1 %cond) {
entry:
%q = load ptr, ptr %p, align 8
%cmp = icmp ne ptr %q, inttoptr (i64 -4096 to ptr)
call void @llvm.assume(i1 %cmp)
%cmp.1 = icmp ne ptr %q, inttoptr (i64 -8192 to ptr)
call void @llvm.assume(i1 %cmp.1)
br i1 %cond, label %bb.switch, label %bb.default
bb.switch:
%p.val = ptrtoint ptr %q to i64
switch i64 %p.val, label %bb.default [
i64 -4096, label %bb.1
i64 -8192, label %bb.1
]
bb.1:
ret i32 0
bb.default:
ret i32 1
} |
Add support for constraints over
ptrtoint
casts. Particularly, assumed predicate invariants over the integral value of a pointer are constraints on the cast to integer value itself.Fixes: #158324.