Skip to content

Commit 462a54f

Browse files
[KnownBits] Refine known bits for lerp
1 parent aedd1e7 commit 462a54f

File tree

3 files changed

+185
-13
lines changed

3 files changed

+185
-13
lines changed

llvm/include/llvm/IR/PatternMatch.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -882,8 +882,10 @@ m_Instruction(Instruction *&I, const MatchTy &Match) {
882882

883883
/// Match a unary operator, capturing it if we match.
884884
inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
885+
inline bind_ty<const UnaryOperator> m_UnOp(const UnaryOperator *&I) { return I; }
885886
/// Match a binary operator, capturing it if we match.
886887
inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
888+
inline bind_ty<const BinaryOperator> m_BinOp(const BinaryOperator *&I) { return I; }
887889
/// Match a with overflow intrinsic, capturing it if we match.
888890
inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) {
889891
return I;

llvm/lib/Analysis/ValueTracking.cpp

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,153 @@ unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
350350
return V->getType()->getScalarSizeInBits() - SignBits + 1;
351351
}
352352

353+
/// Try to detect the lerp pattern: a * (b - c) + c * d
354+
/// where a >= 0, b >= 0, c >= 0, d >= 0, and b >= c.
355+
///
356+
/// In that particular case, we can use the following chain of reasoning:
357+
///
358+
/// a * (b - c) + c * d <= a' * (b - c) + a' * c = a' * b where a' = max(a, d)
359+
///
360+
/// Since that is true for arbitrary a, b, c and d within our constraints, we
361+
/// can conclude that:
362+
///
363+
/// max(a * (b - c) + c * d) <= max(max(a), max(d)) * max(b) = U
364+
///
365+
/// Considering that any result of the lerp would be less or equal to U, it
366+
/// would have at least the number of leading 0s as in U.
367+
///
368+
/// While being quite a specific situation, it is fairly common in computer
369+
/// graphics in the shape of alpha blending.
370+
///
371+
/// Returns unknown bits if the pattern doesn't match or constraints don't apply
372+
/// to the given operands.
373+
static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1,
374+
const APInt &DemandedElts,
375+
KnownBits &KnownOut,
376+
const SimplifyQuery &Q,
377+
unsigned Depth) {
378+
379+
Type *Ty = Op0->getType();
380+
const unsigned BitWidth = Ty->getScalarSizeInBits();
381+
382+
// Only handle scalar types for now
383+
if (Ty->isVectorTy())
384+
return;
385+
386+
// Try to match: a * (b - c) + c * d.
387+
// When a == 1 => A == nullptr, the same applies to d/D as well.
388+
const Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
389+
const BinaryOperator *SubBC = nullptr;
390+
391+
const auto MatchSubBC = [&]() {
392+
// (b - c) can have two forms that interest us:
393+
//
394+
// 1. sub nuw %b, %c
395+
// 2. xor %c, %b
396+
//
397+
// For the first case, nuw flag guarantees our requirement b >= c.
398+
//
399+
// The second case happens when the analysis can infer that b is a mask for
400+
// c and we can transform sub operation into xor (that is usually true for
401+
// constant b's). Even though xor is symmetrical, canonicalization ensures
402+
// that the constant will be the RHS. xor of two positive integers is
403+
// guaranteed to be non-negative as well.
404+
return m_CombineAnd(m_BinOp(SubBC),
405+
m_CombineOr(m_NUWSub(m_Value(B), m_Value(C)),
406+
m_Xor(m_Value(C), m_Value(B))));
407+
};
408+
409+
const auto MatchASubBC = [&]() {
410+
// Cases:
411+
// - a * (b - c)
412+
// - (b - c) * a
413+
// - (b - c) <- a implicitly equals 1
414+
return m_CombineOr(m_c_Mul(m_Value(A), MatchSubBC()), MatchSubBC());
415+
};
416+
417+
const auto MatchCD = [&]() {
418+
// Cases:
419+
// - d * c
420+
// - c * d
421+
// - c <- d implicitly equals 1
422+
return m_CombineOr(m_c_Mul(m_Value(D), m_Specific(C)), m_Specific(C));
423+
};
424+
425+
const auto Match = [&](const Value *LHS, const Value *RHS) {
426+
// We do use m_Specific(C) in MatchCD, so we have to make sure that
427+
// it's bound to anything and match(LHS, MatchASubBC()) absolutely
428+
// has to evaluate first and return true.
429+
//
430+
// If Match returns true, it is guaranteed that B != nullptr, C != nullptr.
431+
return match(LHS, MatchASubBC()) && match(RHS, MatchCD());
432+
};
433+
434+
if (!Match(Op0, Op1) && !Match(Op1, Op0))
435+
return;
436+
437+
const auto ComputeKnownBitsOrOne = [&](const Value *V) {
438+
// For some of the values we use the convention of leaving
439+
// it nullptr to signify an implicit constant 1.
440+
return V ? computeKnownBits(V, DemandedElts, Q, Depth + 1)
441+
: KnownBits::makeConstant(APInt(BitWidth, 1));
442+
};
443+
444+
// Check that all operands are non-negative
445+
const KnownBits KnownA = ComputeKnownBitsOrOne(A);
446+
if (!KnownA.isNonNegative())
447+
return;
448+
449+
const KnownBits KnownD = ComputeKnownBitsOrOne(D);
450+
if (!KnownD.isNonNegative())
451+
return;
452+
453+
const KnownBits KnownB = computeKnownBits(B, DemandedElts, Q, Depth + 1);
454+
if (!KnownB.isNonNegative())
455+
return;
456+
457+
const KnownBits KnownC = computeKnownBits(C, DemandedElts, Q, Depth + 1);
458+
if (!KnownC.isNonNegative())
459+
return;
460+
461+
if (SubBC->getOpcode() == Instruction::Xor) {
462+
// If we matched subtraction as xor, we need to actually check that xor
463+
// is semantically equivalent to subtraction.
464+
//
465+
// For that to be true, b has to be a mask for c.
466+
// In known bits terms it would mean the following:
467+
//
468+
// - b is a constant
469+
if (!KnownB.isConstant())
470+
return;
471+
472+
// - b has ones at least in every position where c might have ones.
473+
const APInt MaxC = KnownC.getMaxValue();
474+
if ((KnownB.getConstant() & MaxC) != MaxC)
475+
return;
476+
}
477+
478+
// Compute max(a, d)
479+
const APInt MaxA = KnownA.getMaxValue();
480+
const APInt MaxD = KnownD.getMaxValue();
481+
const APInt MaxAD = APIntOps::umax(MaxA, MaxD);
482+
483+
// Compute max(a, d) * max(b)
484+
const APInt MaxB = KnownB.getMaxValue();
485+
bool Overflow;
486+
const APInt UpperBound = MaxAD.umul_ov(MaxB, Overflow);
487+
488+
if (Overflow)
489+
return;
490+
491+
// Count leading zeros in upper bound
492+
const unsigned MinimumNumberOfLeadingZeros = UpperBound.countl_zero();
493+
494+
// Create KnownBits with only leading zeros set
495+
KnownOut.Zero.setHighBits(MinimumNumberOfLeadingZeros);
496+
497+
return;
498+
}
499+
353500
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
354501
bool NSW, bool NUW,
355502
const APInt &DemandedElts,
@@ -369,6 +516,10 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
369516
isImpliedByDomCondition(ICmpInst::ICMP_SLE, Op1, Op0, Q.CxtI, Q.DL)
370517
.value_or(false))
371518
KnownOut.makeNonNegative();
519+
520+
if (Add)
521+
// Try to match lerp pattern and combine results
522+
computeKnownBitsFromLerpPattern(Op0, Op1, DemandedElts, KnownOut, Q, Depth);
372523
}
373524

374525
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,

llvm/test/Transforms/InstCombine/known-bits-lerp-pattern.ll

Lines changed: 32 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@ define i32 @test_clamp(i8 %a, i8 %c, i8 %d) {
1515
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
1616
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
1717
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
18-
; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.umin.i32(i32 [[ADD]], i32 65535)
19-
; CHECK-NEXT: ret i32 [[RESULT]]
18+
; CHECK-NEXT: ret i32 [[ADD]]
2019
;
2120
%a32 = zext i8 %a to i32
2221
%c32 = zext i8 %c to i32
@@ -40,8 +39,7 @@ define i1 @test_trunc_cmp(i8 %a, i8 %c, i8 %d) {
4039
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
4140
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
4241
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
43-
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
44-
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
42+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234
4543
; CHECK-NEXT: ret i1 [[CMP]]
4644
;
4745
%a32 = zext i8 %a to i32
@@ -66,8 +64,7 @@ define i1 @test_trunc_cmp_xor(i8 %a, i8 %c, i8 %d) {
6664
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
6765
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
6866
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
69-
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
70-
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
67+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234
7168
; CHECK-NEXT: ret i1 [[CMP]]
7269
;
7370
%a32 = zext i8 %a to i32
@@ -93,8 +90,7 @@ define i1 @test_trunc_cmp_arbitrary_b(i8 %a, i8 %b, i8 %c, i8 %d) {
9390
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
9491
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
9592
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
96-
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
97-
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
93+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234
9894
; CHECK-NEXT: ret i1 [[CMP]]
9995
;
10096
%a32 = zext i8 %a to i32
@@ -120,8 +116,7 @@ define i1 @test_trunc_cmp_no_a(i8 %b, i8 %c, i8 %d) {
120116
; CHECK-NEXT: [[MUL1:%.*]] = sub nuw nsw i32 [[B32]], [[C32]]
121117
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
122118
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
123-
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
124-
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
119+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234
125120
; CHECK-NEXT: ret i1 [[CMP]]
126121
;
127122
%b32 = zext i8 %b to i32
@@ -144,8 +139,7 @@ define i1 @test_trunc_cmp_no_d(i8 %a, i8 %b, i8 %c) {
144139
; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[B32]], [[C32]]
145140
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
146141
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[C32]]
147-
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
148-
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
142+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234
149143
; CHECK-NEXT: ret i1 [[CMP]]
150144
;
151145
%a32 = zext i8 %a to i32
@@ -159,4 +153,29 @@ define i1 @test_trunc_cmp_no_d(i8 %a, i8 %b, i8 %c) {
159153
ret i1 %cmp
160154
}
161155

162-
declare void @llvm.assume(i1)
156+
define i1 @test_trunc_cmp_xor_negative(i8 %a, i8 %c, i8 %d) {
157+
; CHECK-LABEL: define i1 @test_trunc_cmp_xor_negative(
158+
; CHECK-SAME: i8 [[A:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) {
159+
; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32
160+
; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32
161+
; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32
162+
; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[C32]], 234
163+
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]]
164+
; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]]
165+
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]]
166+
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16
167+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234
168+
; CHECK-NEXT: ret i1 [[CMP]]
169+
;
170+
%a32 = zext i8 %a to i32
171+
%c32 = zext i8 %c to i32
172+
%d32 = zext i8 %d to i32
173+
%sub = xor i32 234, %c32
174+
%mul1 = mul i32 %a32, %sub
175+
%mul2 = mul i32 %c32, %d32
176+
%add = add i32 %mul1, %mul2
177+
; We should keep the trunc in this case
178+
%trunc = trunc i32 %add to i16
179+
%cmp = icmp eq i16 %trunc, 1234
180+
ret i1 %cmp
181+
}

0 commit comments

Comments
 (0)