Skip to content

Commit 0561b33

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-next/master' into for-next
Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents d47049f + 5b4c54a commit 0561b33

File tree

3 files changed

+191
-20
lines changed

3 files changed

+191
-20
lines changed

kernel/bpf/verifier.c

Lines changed: 58 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2523,6 +2523,58 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
25232523
if ((u64)reg->smin_value <= (u64)reg->smax_value) {
25242524
reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
25252525
reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
2526+
} else {
2527+
/* If the s64 range crosses the sign boundary, then it's split
2528+
* between the beginning and end of the U64 domain. In that
2529+
* case, we can derive new bounds if the u64 range overlaps
2530+
* with only one end of the s64 range.
2531+
*
2532+
* In the following example, the u64 range overlaps only with
2533+
* positive portion of the s64 range.
2534+
*
2535+
* 0 U64_MAX
2536+
* | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2537+
* |----------------------------|----------------------------|
2538+
* |xxxxx s64 range xxxxxxxxx] [xxxxxxx|
2539+
* 0 S64_MAX S64_MIN -1
2540+
*
2541+
* We can thus derive the following new s64 and u64 ranges.
2542+
*
2543+
* 0 U64_MAX
2544+
* | [xxxxxx u64 range xxxxx] |
2545+
* |----------------------------|----------------------------|
2546+
* | [xxxxxx s64 range xxxxx] |
2547+
* 0 S64_MAX S64_MIN -1
2548+
*
2549+
* If they overlap in two places, we can't derive anything
2550+
* because reg_state can't represent two ranges per numeric
2551+
* domain.
2552+
*
2553+
* 0 U64_MAX
2554+
* | [xxxxxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxxxxx] |
2555+
* |----------------------------|----------------------------|
2556+
* |xxxxx s64 range xxxxxxxxx] [xxxxxxxxxx|
2557+
* 0 S64_MAX S64_MIN -1
2558+
*
2559+
* The first condition below corresponds to the first diagram
2560+
* above.
2561+
*/
2562+
if (reg->umax_value < (u64)reg->smin_value) {
2563+
reg->smin_value = (s64)reg->umin_value;
2564+
reg->umax_value = min_t(u64, reg->umax_value, reg->smax_value);
2565+
} else if ((u64)reg->smax_value < reg->umin_value) {
2566+
/* This second condition considers the case where the u64 range
2567+
* overlaps with the negative portion of the s64 range:
2568+
*
2569+
* 0 U64_MAX
2570+
* | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2571+
* |----------------------------|----------------------------|
2572+
* |xxxxxxxxx] [xxxxxxxxxxxx s64 range |
2573+
* 0 S64_MAX S64_MIN -1
2574+
*/
2575+
reg->smax_value = (s64)reg->umax_value;
2576+
reg->umin_value = max_t(u64, reg->umin_value, reg->smin_value);
2577+
}
25262578
}
25272579
}
25282580

@@ -2554,20 +2606,6 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
25542606
reg->smin_value = max_t(s64, reg->smin_value, new_smin);
25552607
reg->smax_value = min_t(s64, reg->smax_value, new_smax);
25562608

2557-
/* if s32 can be treated as valid u32 range, we can use it as well */
2558-
if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
2559-
/* s32 -> u64 tightening */
2560-
new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2561-
new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2562-
reg->umin_value = max_t(u64, reg->umin_value, new_umin);
2563-
reg->umax_value = min_t(u64, reg->umax_value, new_umax);
2564-
/* s32 -> s64 tightening */
2565-
new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2566-
new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2567-
reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2568-
reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2569-
}
2570-
25712609
/* Here we would like to handle a special case after sign extending load,
25722610
* when upper bits for a 64-bit range are all 1s or all 0s.
25732611
*
@@ -2634,6 +2672,7 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
26342672
/* We might have learned something about the sign bit. */
26352673
__reg_deduce_bounds(reg);
26362674
__reg_deduce_bounds(reg);
2675+
__reg_deduce_bounds(reg);
26372676
/* We might have learned some bits from the bounds. */
26382677
__reg_bound_offset(reg);
26392678
/* Intersecting with the old var_off might have improved our bounds
@@ -4518,7 +4557,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
45184557
* . if (scalar cond K|scalar)
45194558
* . helper_call(.., scalar, ...) where ARG_CONST is expected
45204559
* backtrack through the verifier states and mark all registers and
4521-
* stack slots with spilled constants that these scalar regisers
4560+
* stack slots with spilled constants that these scalar registers
45224561
* should be precise.
45234562
* . during state pruning two registers (or spilled stack slots)
45244563
* are equivalent if both are not precise.
@@ -18450,7 +18489,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
1845018489
/* the parentage chains form a tree.
1845118490
* the verifier states are added to state lists at given insn and
1845218491
* pushed into state stack for future exploration.
18453-
* when the verifier reaches bpf_exit insn some of the verifer states
18492+
* when the verifier reaches bpf_exit insn some of the verifier states
1845418493
* stored in the state lists have their final liveness state already,
1845518494
* but a lot of states will get revised from liveness point of view when
1845618495
* the verifier explores other branches.
@@ -19166,7 +19205,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
1916619205
* terminology) calls specially: as opposed to bounded BPF loops, it *expects*
1916719206
* states to match, which otherwise would look like an infinite loop. So while
1916819207
* iter_next() calls are taken care of, we still need to be careful and
19169-
* prevent erroneous and too eager declaration of "ininite loop", when
19208+
* prevent erroneous and too eager declaration of "infinite loop", when
1917019209
* iterators are involved.
1917119210
*
1917219211
* Here's a situation in pseudo-BPF assembly form:
@@ -19208,7 +19247,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
1920819247
*
1920919248
* This approach allows to keep infinite loop heuristic even in the face of
1921019249
* active iterator. E.g., C snippet below is and will be detected as
19211-
* inifintely looping:
19250+
* infinitely looping:
1921219251
*
1921319252
* struct bpf_iter_num it;
1921419253
* int *p, x;
@@ -24449,7 +24488,7 @@ static int compute_scc(struct bpf_verifier_env *env)
2444924488
* if pre[i] == 0:
2445024489
* recur(i)
2445124490
*
24452-
* Below implementation replaces explicit recusion with array 'dfs'.
24491+
* Below implementation replaces explicit recursion with array 'dfs'.
2445324492
*/
2445424493
for (i = 0; i < insn_cnt; i++) {
2445524494
if (pre[i])

tools/testing/selftests/bpf/prog_tests/reg_bounds.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -465,6 +465,20 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
465465
return range_improve(x_t, x, x_swap);
466466
}
467467

468+
if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
469+
if (x_t == S64 && x.a > x.b) {
470+
if (x.b < y.a && x.a <= y.b)
471+
return range(x_t, x.a, y.b);
472+
if (x.a > y.b && x.b >= y.a)
473+
return range(x_t, y.a, x.b);
474+
} else if (x_t == U64 && y.a > y.b) {
475+
if (y.b < x.a && y.a <= x.b)
476+
return range(x_t, y.a, x.b);
477+
if (y.a > x.b && y.b >= x.a)
478+
return range(x_t, x.a, y.b);
479+
}
480+
}
481+
468482
/* otherwise, plain range cast and intersection works */
469483
return range_improve(x_t, x, y_cast);
470484
}

tools/testing/selftests/bpf/progs/verifier_bounds.c

Lines changed: 119 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1066,7 +1066,7 @@ l0_%=: r0 = 0; \
10661066
SEC("xdp")
10671067
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
10681068
__success __retval(0)
1069-
__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
1069+
__flag(BPF_F_TEST_REG_INVARIANTS)
10701070
__naked void crossing_64_bit_signed_boundary_2(void)
10711071
{
10721072
asm volatile (" \
@@ -1550,4 +1550,122 @@ l0_%=: r0 = 0; \
15501550
: __clobber_all);
15511551
}
15521552

1553+
/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
1554+
* overlap on the negative side. At instruction 7, the ranges look as follows:
1555+
*
1556+
* 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX
1557+
* | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1558+
* |----------------------------|------------------------------|
1559+
* |xxxxxxxxxx] [xxxxxxxxxxxx|
1560+
* 0 smax=0xeffffeee smin=-655 -1
1561+
*
1562+
* We should therefore deduce the following new bounds:
1563+
*
1564+
* 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX
1565+
* | [xxx] |
1566+
* |----------------------------|------------------------------|
1567+
* | [xxx] |
1568+
* 0 s64=[-655;-146] -1
1569+
*
1570+
* Without the deduction cross sign boundary, we end up with an invariant
1571+
* violation error.
1572+
*/
1573+
SEC("socket")
1574+
__description("bounds deduction cross sign boundary, negative overlap")
1575+
__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
1576+
__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))")
1577+
__retval(0)
1578+
__naked void bounds_deduct_negative_overlap(void)
1579+
{
1580+
asm volatile(" \
1581+
call %[bpf_get_prandom_u32]; \
1582+
w3 = w0; \
1583+
w6 = (s8)w0; \
1584+
r0 = (s8)r0; \
1585+
if w6 >= 0xf0000000 goto l0_%=; \
1586+
r0 += r6; \
1587+
r6 += 400; \
1588+
r0 -= r6; \
1589+
if r3 < r0 goto l0_%=; \
1590+
l0_%=: r0 = 0; \
1591+
exit; \
1592+
" :
1593+
: __imm(bpf_get_prandom_u32)
1594+
: __clobber_all);
1595+
}
1596+
1597+
/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
1598+
* overlap on the positive side. At instruction 3, the ranges look as follows:
1599+
*
1600+
* 0 umin=0 umax=0xffffffffffffff00 U64_MAX
1601+
* [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1602+
* |----------------------------|------------------------------|
1603+
* |xxxxxxxx] [xxxxxxxx|
1604+
* 0 smax=127 smin=-128 -1
1605+
*
1606+
* We should therefore deduce the following new bounds:
1607+
*
1608+
* 0 u64=[0;127] U64_MAX
1609+
* [xxxxxxxx] |
1610+
* |----------------------------|------------------------------|
1611+
* [xxxxxxxx] |
1612+
* 0 s64=[0;127] -1
1613+
*
1614+
* Without the deduction cross sign boundary, the program is rejected due to
1615+
* the frame pointer write.
1616+
*/
1617+
SEC("socket")
1618+
__description("bounds deduction cross sign boundary, positive overlap")
1619+
__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
1620+
__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))")
1621+
__retval(0)
1622+
__naked void bounds_deduct_positive_overlap(void)
1623+
{
1624+
asm volatile(" \
1625+
call %[bpf_get_prandom_u32]; \
1626+
r0 = (s8)r0; \
1627+
r1 = 0xffffffffffffff00; \
1628+
if r0 > r1 goto l0_%=; \
1629+
if r0 < 128 goto l0_%=; \
1630+
r10 = 0; \
1631+
l0_%=: r0 = 0; \
1632+
exit; \
1633+
" :
1634+
: __imm(bpf_get_prandom_u32)
1635+
: __clobber_all);
1636+
}
1637+
1638+
/* This test is the same as above, but the s64 and u64 ranges overlap in two
1639+
* places. At instruction 3, the ranges look as follows:
1640+
*
1641+
* 0 umin=0 umax=0xffffffffffffff80 U64_MAX
1642+
* [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1643+
* |----------------------------|------------------------------|
1644+
* |xxxxxxxx] [xxxxxxxx|
1645+
* 0 smax=127 smin=-128 -1
1646+
*
1647+
* 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and
1648+
* the program should fail due to the frame pointer write.
1649+
*/
1650+
SEC("socket")
1651+
__description("bounds deduction cross sign boundary, two overlaps")
1652+
__failure __flag(BPF_F_TEST_REG_INVARIANTS)
1653+
__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
1654+
__msg("frame pointer is read only")
1655+
__naked void bounds_deduct_two_overlaps(void)
1656+
{
1657+
asm volatile(" \
1658+
call %[bpf_get_prandom_u32]; \
1659+
r0 = (s8)r0; \
1660+
r1 = 0xffffffffffffff80; \
1661+
if r0 > r1 goto l0_%=; \
1662+
if r0 < 128 goto l0_%=; \
1663+
r10 = 0; \
1664+
l0_%=: r0 = 0; \
1665+
exit; \
1666+
" :
1667+
: __imm(bpf_get_prandom_u32)
1668+
: __clobber_all);
1669+
}
1670+
15531671
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)