diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index f727eb0a63e05..1ba548b6ff062 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -2164,10 +2164,14 @@ Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS, // If this is a single inbounds GEP and the original sub was nuw, // then the final multiplication is also nuw. - if (auto *I = dyn_cast(Result)) + if (auto *I = dyn_cast(Result)) if (IsNUW && match(Offset2, m_Zero()) && Base.LHSNW.isInBounds() && - I->getOpcode() == Instruction::Mul) - I->setHasNoUnsignedWrap(); + (I->use_empty() || I->hasOneUse()) && I->hasNoSignedWrap() && + !I->hasNoUnsignedWrap() && + ((I->getOpcode() == Instruction::Mul && + match(I->getOperand(1), m_NonNegative())) || + I->getOpcode() == Instruction::Shl)) + cast(I)->setHasNoUnsignedWrap(); // If we have a 2nd GEP of the same base pointer, subtract the offsets. // If both GEPs are inbounds, then the subtract does not have signed overflow. diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll index 993a06ad1780f..11af6b4a0197f 100644 --- a/llvm/test/Transforms/InstCombine/sub-gep.ll +++ b/llvm/test/Transforms/InstCombine/sub-gep.ll @@ -1089,3 +1089,72 @@ define <2 x i64> @splat_geps_multiple(ptr %base, i64 %idx0, <2 x i64> %idx1, <2 %d = sub <2 x i64> %gep2.int, %gep1.int ret <2 x i64> %d } + +define i64 @nuw_ptrdiff_shl_nsw(ptr %base, i64 %idx) { +; CHECK-LABEL: @nuw_ptrdiff_shl_nsw( +; CHECK-NEXT: [[OFFSET:%.*]] = shl nuw nsw i64 [[IDX:%.*]], 3 +; CHECK-NEXT: ret i64 [[OFFSET]] +; + %offset = shl nsw i64 %idx, 3 + %gep = getelementptr inbounds i8, ptr %base, i64 %offset + %lhs = ptrtoint ptr %gep to i64 + %rhs = ptrtoint ptr %base to i64 + %diff = sub nuw i64 %lhs, %rhs + ret i64 %diff +} + +define i64 @nuw_ptrdiff_shl_nonsw(ptr %base, i64 %idx) { +; CHECK-LABEL: @nuw_ptrdiff_shl_nonsw( +; CHECK-NEXT: [[OFFSET:%.*]] = shl i64 [[IDX:%.*]], 3 +; CHECK-NEXT: ret i64 [[OFFSET]] +; + %offset = shl i64 %idx, 3 + %gep = getelementptr inbounds i8, ptr %base, i64 %offset + %lhs = ptrtoint ptr %gep to i64 + %rhs = ptrtoint ptr %base to i64 + %diff = sub nuw i64 %lhs, %rhs + ret i64 %diff +} + +define i64 @nuw_ptrdiff_mul_nsw_nneg_scale(ptr %base, i64 %idx) { +; CHECK-LABEL: @nuw_ptrdiff_mul_nsw_nneg_scale( +; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[IDX:%.*]], 3 +; CHECK-NEXT: ret i64 [[OFFSET]] +; + %offset = mul nsw i64 %idx, 3 + %gep = getelementptr inbounds i8, ptr %base, i64 %offset + %lhs = ptrtoint ptr %gep to i64 + %rhs = ptrtoint ptr %base to i64 + %diff = sub nuw i64 %lhs, %rhs + ret i64 %diff +} + +define i64 @nuw_ptrdiff_mul_nsw_unknown_scale(ptr %base, i64 %idx, i64 %scale) { +; CHECK-LABEL: @nuw_ptrdiff_mul_nsw_unknown_scale( +; CHECK-NEXT: [[OFFSET:%.*]] = mul nsw i64 [[IDX:%.*]], [[SCALE:%.*]] +; CHECK-NEXT: ret i64 [[OFFSET]] +; + %offset = mul nsw i64 %idx, %scale + %gep = getelementptr inbounds i8, ptr %base, i64 %offset + %lhs = ptrtoint ptr %gep to i64 + %rhs = ptrtoint ptr %base to i64 + %diff = sub nuw i64 %lhs, %rhs + ret i64 %diff +} + +declare void @usei64(i64) + +define i64 @nuw_ptrdiff_mul_nsw_nneg_scale_multiuse(ptr %base, i64 %idx) { +; CHECK-LABEL: @nuw_ptrdiff_mul_nsw_nneg_scale_multiuse( +; CHECK-NEXT: [[OFFSET:%.*]] = mul nsw i64 [[IDX:%.*]], 3 +; CHECK-NEXT: call void @usei64(i64 [[OFFSET]]) +; CHECK-NEXT: ret i64 [[OFFSET]] +; + %offset = mul nsw i64 %idx, 3 + call void @usei64(i64 %offset) + %gep = getelementptr inbounds i8, ptr %base, i64 %offset + %lhs = ptrtoint ptr %gep to i64 + %rhs = ptrtoint ptr %base to i64 + %diff = sub nuw i64 %lhs, %rhs + ret i64 %diff +}