@@ -2523,6 +2523,58 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
25232523 if ((u64)reg->smin_value <= (u64)reg->smax_value) {
25242524 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
25252525 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
2526+ } else {
2527+ /* If the s64 range crosses the sign boundary, then it's split
2528+ * between the beginning and end of the U64 domain. In that
2529+ * case, we can derive new bounds if the u64 range overlaps
2530+ * with only one end of the s64 range.
2531+ *
2532+ * In the following example, the u64 range overlaps only with
2533+ * positive portion of the s64 range.
2534+ *
2535+ * 0 U64_MAX
2536+ * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2537+ * |----------------------------|----------------------------|
2538+ * |xxxxx s64 range xxxxxxxxx] [xxxxxxx|
2539+ * 0 S64_MAX S64_MIN -1
2540+ *
2541+ * We can thus derive the following new s64 and u64 ranges.
2542+ *
2543+ * 0 U64_MAX
2544+ * | [xxxxxx u64 range xxxxx] |
2545+ * |----------------------------|----------------------------|
2546+ * | [xxxxxx s64 range xxxxx] |
2547+ * 0 S64_MAX S64_MIN -1
2548+ *
2549+ * If they overlap in two places, we can't derive anything
2550+ * because reg_state can't represent two ranges per numeric
2551+ * domain.
2552+ *
2553+ * 0 U64_MAX
2554+ * | [xxxxxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxxxxx] |
2555+ * |----------------------------|----------------------------|
2556+ * |xxxxx s64 range xxxxxxxxx] [xxxxxxxxxx|
2557+ * 0 S64_MAX S64_MIN -1
2558+ *
2559+ * The first condition below corresponds to the first diagram
2560+ * above.
2561+ */
2562+ if (reg->umax_value < (u64)reg->smin_value) {
2563+ reg->smin_value = (s64)reg->umin_value;
2564+ reg->umax_value = min_t(u64, reg->umax_value, reg->smax_value);
2565+ } else if ((u64)reg->smax_value < reg->umin_value) {
2566+ /* This second condition considers the case where the u64 range
2567+ * overlaps with the negative portion of the s64 range:
2568+ *
2569+ * 0 U64_MAX
2570+ * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2571+ * |----------------------------|----------------------------|
2572+ * |xxxxxxxxx] [xxxxxxxxxxxx s64 range |
2573+ * 0 S64_MAX S64_MIN -1
2574+ */
2575+ reg->smax_value = (s64)reg->umax_value;
2576+ reg->umin_value = max_t(u64, reg->umin_value, reg->smin_value);
2577+ }
25262578 }
25272579}
25282580
@@ -2554,20 +2606,6 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
25542606 reg->smin_value = max_t(s64, reg->smin_value, new_smin);
25552607 reg->smax_value = min_t(s64, reg->smax_value, new_smax);
25562608
2557- /* if s32 can be treated as valid u32 range, we can use it as well */
2558- if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
2559- /* s32 -> u64 tightening */
2560- new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2561- new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2562- reg->umin_value = max_t(u64, reg->umin_value, new_umin);
2563- reg->umax_value = min_t(u64, reg->umax_value, new_umax);
2564- /* s32 -> s64 tightening */
2565- new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2566- new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2567- reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2568- reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2569- }
2570-
25712609 /* Here we would like to handle a special case after sign extending load,
25722610 * when upper bits for a 64-bit range are all 1s or all 0s.
25732611 *
@@ -2634,6 +2672,7 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
26342672 /* We might have learned something about the sign bit. */
26352673 __reg_deduce_bounds(reg);
26362674 __reg_deduce_bounds(reg);
2675+ __reg_deduce_bounds(reg);
26372676 /* We might have learned some bits from the bounds. */
26382677 __reg_bound_offset(reg);
26392678 /* Intersecting with the old var_off might have improved our bounds
@@ -4518,7 +4557,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
45184557 * . if (scalar cond K|scalar)
45194558 * . helper_call(.., scalar, ...) where ARG_CONST is expected
45204559 * backtrack through the verifier states and mark all registers and
4521- * stack slots with spilled constants that these scalar regisers
4560+ * stack slots with spilled constants that these scalar registers
45224561 * should be precise.
45234562 * . during state pruning two registers (or spilled stack slots)
45244563 * are equivalent if both are not precise.
@@ -18450,7 +18489,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
1845018489/* the parentage chains form a tree.
1845118490 * the verifier states are added to state lists at given insn and
1845218491 * pushed into state stack for future exploration.
18453- * when the verifier reaches bpf_exit insn some of the verifer states
18492+ * when the verifier reaches bpf_exit insn some of the verifier states
1845418493 * stored in the state lists have their final liveness state already,
1845518494 * but a lot of states will get revised from liveness point of view when
1845618495 * the verifier explores other branches.
@@ -19166,7 +19205,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
1916619205 * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
1916719206 * states to match, which otherwise would look like an infinite loop. So while
1916819207 * iter_next() calls are taken care of, we still need to be careful and
19169- * prevent erroneous and too eager declaration of "ininite loop", when
19208+ * prevent erroneous and too eager declaration of "infinite loop", when
1917019209 * iterators are involved.
1917119210 *
1917219211 * Here's a situation in pseudo-BPF assembly form:
@@ -19208,7 +19247,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
1920819247 *
1920919248 * This approach allows to keep infinite loop heuristic even in the face of
1921019249 * active iterator. E.g., C snippet below is and will be detected as
19211- * inifintely looping:
19250+ * infinitely looping:
1921219251 *
1921319252 * struct bpf_iter_num it;
1921419253 * int *p, x;
@@ -24449,7 +24488,7 @@ static int compute_scc(struct bpf_verifier_env *env)
2444924488 * if pre[i] == 0:
2445024489 * recur(i)
2445124490 *
24452- * Below implementation replaces explicit recusion with array 'dfs'.
24491+ * Below implementation replaces explicit recursion with array 'dfs'.
2445324492 */
2445424493 for (i = 0; i < insn_cnt; i++) {
2445524494 if (pre[i])
0 commit comments