Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 16 additions & 6 deletions llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,8 @@ m_scev_PtrToInt(const Op0_t &Op0) {
}

/// Match a binary SCEV.
template <typename SCEVTy, typename Op0_t, typename Op1_t>
template <typename SCEVTy, typename Op0_t, typename Op1_t,
bool Commutable = false>
struct SCEVBinaryExpr_match {
Op0_t Op0;
Op1_t Op1;
Expand All @@ -192,15 +193,18 @@ struct SCEVBinaryExpr_match {

bool match(const SCEV *S) const {
auto *E = dyn_cast<SCEVTy>(S);
return E && E->getNumOperands() == 2 && Op0.match(E->getOperand(0)) &&
Op1.match(E->getOperand(1));
return E && E->getNumOperands() == 2 &&
((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) ||
(Commutable && Op0.match(E->getOperand(1)) &&
Op1.match(E->getOperand(0))));
}
};

template <typename SCEVTy, typename Op0_t, typename Op1_t>
inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t>
template <typename SCEVTy, typename Op0_t, typename Op1_t,
bool Commutable = false>
inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>
m_scev_Binary(const Op0_t &Op0, const Op1_t &Op1) {
return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t>(Op0, Op1);
return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>(Op0, Op1);
}

template <typename Op0_t, typename Op1_t>
Expand All @@ -215,6 +219,12 @@ m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVMulExpr>(Op0, Op1);
}

template <typename Op0_t, typename Op1_t>
inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, true>
m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, true>(Op0, Op1);
}

template <typename Op0_t, typename Op1_t>
inline SCEVBinaryExpr_match<SCEVUDivExpr, Op0_t, Op1_t>
m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
Expand Down
21 changes: 20 additions & 1 deletion llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10785,6 +10785,25 @@ bool ScalarEvolution::SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
if (Depth >= 3)
return false;

const SCEV *NewLHS, *NewRHS;
if (match(LHS, m_scev_c_Mul(m_SCEV(NewLHS), m_SCEVVScale())) &&
match(RHS, m_scev_c_Mul(m_SCEV(NewRHS), m_SCEVVScale()))) {
const SCEVMulExpr *LMul = cast<SCEVMulExpr>(LHS);
const SCEVMulExpr *RMul = cast<SCEVMulExpr>(RHS);

// (X * vscale) uicmp/eq/ne (Y * vscale) ==> X uicmp/eq/ne Y
// when neither multiply wraps.
// (X * vscale) sicmp (Y * vscale) ==> X sicmp Y
// when neither multiply changes sign.
if ((LMul->hasNoSignedWrap() && RMul->hasNoSignedWrap()) ||
(LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap() &&
!ICmpInst::isSigned(Pred))) {
Comment on lines 10796 to 10800
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would be good to reorder & update comment to match the condition?

Suggested change
// (X * vscale) uicmp/eq/ne (Y * vscale) ==> X uicmp/eq/ne Y
// when neither multiply wraps.
// (X * vscale) sicmp (Y * vscale) ==> X sicmp Y
// when neither multiply changes sign.
if ((LMul->hasNoSignedWrap() && RMul->hasNoSignedWrap()) ||
(LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap() &&
!ICmpInst::isSigned(Pred))) {
// (X * vscale) pred (Y * vscale) ==> X pred Y
// when both multiples are NSW.
// (X * vscale) uicmp/eq/ne (Y * vscale) ==> X uicmp/eq/ne Y
// when both multiples are NUW.
if ((LMul->hasNoSignedWrap() && RMul->hasNoSignedWrap()) ||
(LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap() &&
!ICmpInst::isSigned(Pred))) {

LHS = NewLHS;
RHS = NewRHS;
Changed = true;
}
}

// Canonicalize a constant to the right side.
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
// Check for both operands constant.
Expand Down Expand Up @@ -10959,7 +10978,7 @@ bool ScalarEvolution::SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
// Recursively simplify until we either hit a recursion limit or nothing
// changes.
if (Changed)
return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1);
(void)SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use [[maybe_unused]]?


return Changed;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 4
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[B]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP10]], ptr [[B]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP4]], ptr [[B]], align 4
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
Expand Down Expand Up @@ -124,36 +124,29 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 12
; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD4]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP25]], ptr [[TMP12]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
Expand Down Expand Up @@ -191,17 +184,13 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 31
; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2
Expand All @@ -226,14 +215,11 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
Expand Down Expand Up @@ -271,17 +257,13 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 64
; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2
Expand All @@ -306,14 +288,11 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
Expand Down
137 changes: 137 additions & 0 deletions llvm/unittests/Analysis/ScalarEvolutionTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1768,4 +1768,141 @@ TEST_F(ScalarEvolutionsTest, ComplexityComparatorIsStrictWeakOrdering3) {
SE.getSCEV(Or1);
}

TEST_F(ScalarEvolutionsTest, SimplifyICmpOperands) {
LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M =
parseAssemblyString("define i32 @foo(ptr %loc, i32 %a, i32 %b) {"
"entry: "
" ret i32 %a "
"} ",
Err, C);

ASSERT_TRUE(M && "Could not parse module?");
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");

// Remove common factor when there's no signed wrapping.
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
const SCEV *A = SE.getSCEV(getArgByName(F, "a"));
const SCEV *B = SE.getSCEV(getArgByName(F, "b"));
const SCEV *VS = SE.getVScale(A->getType());
const SCEV *VSxA = SE.getMulExpr(VS, A, SCEV::FlagNSW);
const SCEV *VSxB = SE.getMulExpr(VS, B, SCEV::FlagNSW);

{
CmpPredicate NewPred = ICmpInst::ICMP_SLT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_SLT);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, B);
}

{
CmpPredicate NewPred = ICmpInst::ICMP_ULT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_ULT);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, B);
}

{
CmpPredicate NewPred = ICmpInst::ICMP_EQ;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_EQ);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, B);
}

// Verify the common factor's position doesn't impede simplification.
{
const SCEV *C = SE.getConstant(A->getType(), 100);
const SCEV *CxVS = SE.getMulExpr(C, VS, SCEV::FlagNSW);

// Verify common factor is available at different indices.
ASSERT_TRUE(isa<SCEVVScale>(cast<SCEVMulExpr>(VSxA)->getOperand(0)) !=
isa<SCEVVScale>(cast<SCEVMulExpr>(CxVS)->getOperand(0)));

CmpPredicate NewPred = ICmpInst::ICMP_SLT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = CxVS;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_SLT);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, C);
}
});

// Remove common factor when there's no unsigned wrapping.
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
const SCEV *A = SE.getSCEV(getArgByName(F, "a"));
const SCEV *B = SE.getSCEV(getArgByName(F, "b"));
const SCEV *VS = SE.getVScale(A->getType());
const SCEV *VSxA = SE.getMulExpr(VS, A, SCEV::FlagNUW);
const SCEV *VSxB = SE.getMulExpr(VS, B, SCEV::FlagNUW);

{
CmpPredicate NewPred = ICmpInst::ICMP_SLT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_FALSE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
}

{
CmpPredicate NewPred = ICmpInst::ICMP_ULT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_ULT);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, B);
}

{
CmpPredicate NewPred = ICmpInst::ICMP_EQ;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_TRUE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
EXPECT_EQ(NewPred, ICmpInst::ICMP_EQ);
EXPECT_EQ(NewLHS, A);
EXPECT_EQ(NewRHS, B);
}
});

// Do not remove common factor due to wrap flag mismatch.
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
const SCEV *A = SE.getSCEV(getArgByName(F, "a"));
const SCEV *B = SE.getSCEV(getArgByName(F, "b"));
const SCEV *VS = SE.getVScale(A->getType());
const SCEV *VSxA = SE.getMulExpr(VS, A, SCEV::FlagNSW);
const SCEV *VSxB = SE.getMulExpr(VS, B, SCEV::FlagNUW);

{
CmpPredicate NewPred = ICmpInst::ICMP_SLT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_FALSE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
}

{
CmpPredicate NewPred = ICmpInst::ICMP_ULT;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_FALSE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
}

{
CmpPredicate NewPred = ICmpInst::ICMP_EQ;
const SCEV *NewLHS = VSxA;
const SCEV *NewRHS = VSxB;
EXPECT_FALSE(SE.SimplifyICmpOperands(NewPred, NewLHS, NewRHS));
}
});
}

} // end namespace llvm