diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index fed21db393ed2..5871973776683 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -1760,6 +1760,17 @@ Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp, if (!match(And, m_And(m_Value(X), m_APInt(C2)))) return nullptr; + // (and X, highmask) s> [0, ~highmask] --> X s> ~highmask + if (Cmp.getPredicate() == ICmpInst::ICMP_SGT && C1.ule(~*C2) && + C2->isNegatedPowerOf2()) + return new ICmpInst(ICmpInst::ICMP_SGT, X, + ConstantInt::get(X->getType(), ~*C2)); + // (and X, highmask) s< [1, -highmask] --> X s< -highmask + if (Cmp.getPredicate() == ICmpInst::ICMP_SLT && !C1.isSignMask() && + (C1 - 1).ule(~*C2) && C2->isNegatedPowerOf2() && !C2->isSignMask()) + return new ICmpInst(ICmpInst::ICMP_SLT, X, + ConstantInt::get(X->getType(), -*C2)); + // Don't perform the following transforms if the AND has multiple uses if (!And->hasOneUse()) return nullptr; diff --git a/llvm/test/Transforms/InstCombine/icmp-binop.ll b/llvm/test/Transforms/InstCombine/icmp-binop.ll index 878f39bb7c9a5..356489716fff9 100644 --- a/llvm/test/Transforms/InstCombine/icmp-binop.ll +++ b/llvm/test/Transforms/InstCombine/icmp-binop.ll @@ -252,3 +252,110 @@ false: call void @use64(i64 %v) ret i1 false } + +define i1 @test_icmp_sgt_and_negpow2_zero(i32 %add) { +; CHECK-LABEL: @test_icmp_sgt_and_negpow2_zero( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 7 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -8 + %cmp = icmp sgt i32 %and, 0 + ret i1 %cmp +} + +define i1 @test_icmp_slt_and_negpow2_one(i32 %add) { +; CHECK-LABEL: @test_icmp_slt_and_negpow2_one( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD:%.*]], 8 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -8 + %cmp = icmp slt i32 %and, 1 + ret i1 %cmp +} + +define i1 @test_icmp_sgt_and_negpow2_nonzero(i32 %add) { +; CHECK-LABEL: @test_icmp_sgt_and_negpow2_nonzero( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD:%.*]], -8 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[AND]], -2 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -8 + %cmp = icmp sgt i32 %and, -2 + ret i1 %cmp +} + +define i1 @test_icmp_sgt_and_nonnegpow2_zero(i32 %add) { +; CHECK-LABEL: @test_icmp_sgt_and_nonnegpow2_zero( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD:%.*]], 8 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, 8 + %cmp = icmp sgt i32 %and, 0 + ret i1 %cmp +} + +define i1 @test_icmp_ult_and_negpow2_one(i32 %add) { +; CHECK-LABEL: @test_icmp_ult_and_negpow2_one( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD:%.*]], 8 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -8 + %cmp = icmp ult i32 %and, 1 + ret i1 %cmp +} + +define i1 @test_imply_dom_condition(i32 %add) { +; CHECK-LABEL: @test_imply_dom_condition( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 7 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: ret i1 false +; + %and = and i32 %add, -8 + %cmp = icmp sgt i32 %and, 0 + tail call void @llvm.assume(i1 %cmp) + %min.iters.check = icmp ult i32 %and, 8 + ret i1 %min.iters.check +} + +define i1 @test_icmp_slt_and_negpow2_c(i32 %add) { +; CHECK-LABEL: @test_icmp_slt_and_negpow2_c( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD:%.*]], 32 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -32 + %cmp = icmp slt i32 %and, 16 + ret i1 %cmp +} + +define i1 @test_icmp_slt_and_negpow2_invalid_c(i32 %add) { +; CHECK-LABEL: @test_icmp_slt_and_negpow2_invalid_c( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD:%.*]], -32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[AND]], 48 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -32 + %cmp = icmp slt i32 %and, 48 + ret i1 %cmp +} + +define i1 @test_icmp_sgt_and_negpow2_c(i32 %add) { +; CHECK-LABEL: @test_icmp_sgt_and_negpow2_c( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD:%.*]], 31 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -32 + %cmp = icmp sgt i32 %and, 16 + ret i1 %cmp +} + +define i1 @test_icmp_sgt_and_negpow2_invalid_c(i32 %add) { +; CHECK-LABEL: @test_icmp_sgt_and_negpow2_invalid_c( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD:%.*]], -32 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[AND]], 48 +; CHECK-NEXT: ret i1 [[CMP]] +; + %and = and i32 %add, -32 + %cmp = icmp sgt i32 %and, 48 + ret i1 %cmp +} diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index c1b9752607c3d..b266d3e77c434 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -2197,8 +2197,7 @@ define i1 @icmp_ashr_and_overshift(i8 %X) { define i1 @icmp_and_ashr_neg_and_legal(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_and_legal( -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 16 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 32 ; CHECK-NEXT: ret i1 [[CMP]] ; %ashr = ashr i8 %x, 4 diff --git a/llvm/test/Transforms/InstCombine/pr17827.ll b/llvm/test/Transforms/InstCombine/pr17827.ll index 2f10bb5c7f25f..58b77ec60620e 100644 --- a/llvm/test/Transforms/InstCombine/pr17827.ll +++ b/llvm/test/Transforms/InstCombine/pr17827.ll @@ -5,8 +5,7 @@ define i1 @test_shift_and_cmp_not_changed1(i8 %p) { ; CHECK-LABEL: @test_shift_and_cmp_not_changed1( ; CHECK-NEXT: [[SHLP:%.*]] = shl i8 [[P:%.*]], 5 -; CHECK-NEXT: [[ANDP:%.*]] = and i8 [[SHLP]], -64 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[SHLP]], 64 ; CHECK-NEXT: ret i1 [[CMP]] ; %shlp = shl i8 %p, 5 @@ -18,10 +17,7 @@ define i1 @test_shift_and_cmp_not_changed1(i8 %p) { ; With arithmetic right shift, the comparison should not be modified. define i1 @test_shift_and_cmp_not_changed2(i8 %p) { ; CHECK-LABEL: @test_shift_and_cmp_not_changed2( -; CHECK-NEXT: [[SHLP:%.*]] = ashr i8 [[P:%.*]], 5 -; CHECK-NEXT: [[ANDP:%.*]] = and i8 [[SHLP]], -64 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32 -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %shlp = ashr i8 %p, 5 %andp = and i8 %shlp, -64 @@ -34,8 +30,7 @@ define i1 @test_shift_and_cmp_not_changed2(i8 %p) { define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) { ; CHECK-LABEL: @test_shift_and_cmp_changed1( ; CHECK-NEXT: [[ANDP:%.*]] = shl i8 [[P:%.*]], 5 -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[ANDP]], -64 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 33 ; CHECK-NEXT: ret i1 [[CMP]] ; %andp = and i8 %p, 6 @@ -50,8 +45,7 @@ define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) { define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) { ; CHECK-LABEL: @test_shift_and_cmp_changed1_vec( ; CHECK-NEXT: [[ANDP:%.*]] = shl <2 x i8> [[P:%.*]], splat (i8 5) -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[ANDP]], splat (i8 -64) -; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[TMP1]], splat (i8 32) +; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ANDP]], splat (i8 33) ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %andp = and <2 x i8> %p, @@ -91,9 +85,7 @@ define <2 x i1> @test_shift_and_cmp_changed2_vec(<2 x i8> %p) { ; nsw on the shift should not affect the comparison. define i1 @test_shift_and_cmp_changed3(i8 %p) { ; CHECK-LABEL: @test_shift_and_cmp_changed3( -; CHECK-NEXT: [[SHLP:%.*]] = shl nsw i8 [[P:%.*]], 5 -; CHECK-NEXT: [[ANDP:%.*]] = and i8 [[SHLP]], -64 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[P:%.*]], 2 ; CHECK-NEXT: ret i1 [[CMP]] ; %shlp = shl nsw i8 %p, 5