@@ -2523,6 +2523,58 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
2523
2523
if ((u64)reg->smin_value <= (u64)reg->smax_value) {
2524
2524
reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
2525
2525
reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
2526
+ } else {
2527
+ /* If the s64 range crosses the sign boundary, then it's split
2528
+ * between the beginning and end of the U64 domain. In that
2529
+ * case, we can derive new bounds if the u64 range overlaps
2530
+ * with only one end of the s64 range.
2531
+ *
2532
+ * In the following example, the u64 range overlaps only with
2533
+ * positive portion of the s64 range.
2534
+ *
2535
+ * 0 U64_MAX
2536
+ * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2537
+ * |----------------------------|----------------------------|
2538
+ * |xxxxx s64 range xxxxxxxxx] [xxxxxxx|
2539
+ * 0 S64_MAX S64_MIN -1
2540
+ *
2541
+ * We can thus derive the following new s64 and u64 ranges.
2542
+ *
2543
+ * 0 U64_MAX
2544
+ * | [xxxxxx u64 range xxxxx] |
2545
+ * |----------------------------|----------------------------|
2546
+ * | [xxxxxx s64 range xxxxx] |
2547
+ * 0 S64_MAX S64_MIN -1
2548
+ *
2549
+ * If they overlap in two places, we can't derive anything
2550
+ * because reg_state can't represent two ranges per numeric
2551
+ * domain.
2552
+ *
2553
+ * 0 U64_MAX
2554
+ * | [xxxxxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxxxxx] |
2555
+ * |----------------------------|----------------------------|
2556
+ * |xxxxx s64 range xxxxxxxxx] [xxxxxxxxxx|
2557
+ * 0 S64_MAX S64_MIN -1
2558
+ *
2559
+ * The first condition below corresponds to the first diagram
2560
+ * above.
2561
+ */
2562
+ if (reg->umax_value < (u64)reg->smin_value) {
2563
+ reg->smin_value = (s64)reg->umin_value;
2564
+ reg->umax_value = min_t(u64, reg->umax_value, reg->smax_value);
2565
+ } else if ((u64)reg->smax_value < reg->umin_value) {
2566
+ /* This second condition considers the case where the u64 range
2567
+ * overlaps with the negative portion of the s64 range:
2568
+ *
2569
+ * 0 U64_MAX
2570
+ * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
2571
+ * |----------------------------|----------------------------|
2572
+ * |xxxxxxxxx] [xxxxxxxxxxxx s64 range |
2573
+ * 0 S64_MAX S64_MIN -1
2574
+ */
2575
+ reg->smax_value = (s64)reg->umax_value;
2576
+ reg->umin_value = max_t(u64, reg->umin_value, reg->smin_value);
2577
+ }
2526
2578
}
2527
2579
}
2528
2580
@@ -2554,20 +2606,6 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
2554
2606
reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2555
2607
reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2556
2608
2557
- /* if s32 can be treated as valid u32 range, we can use it as well */
2558
- if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
2559
- /* s32 -> u64 tightening */
2560
- new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2561
- new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2562
- reg->umin_value = max_t(u64, reg->umin_value, new_umin);
2563
- reg->umax_value = min_t(u64, reg->umax_value, new_umax);
2564
- /* s32 -> s64 tightening */
2565
- new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2566
- new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2567
- reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2568
- reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2569
- }
2570
-
2571
2609
/* Here we would like to handle a special case after sign extending load,
2572
2610
* when upper bits for a 64-bit range are all 1s or all 0s.
2573
2611
*
@@ -2634,6 +2672,7 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
2634
2672
/* We might have learned something about the sign bit. */
2635
2673
__reg_deduce_bounds(reg);
2636
2674
__reg_deduce_bounds(reg);
2675
+ __reg_deduce_bounds(reg);
2637
2676
/* We might have learned some bits from the bounds. */
2638
2677
__reg_bound_offset(reg);
2639
2678
/* Intersecting with the old var_off might have improved our bounds
@@ -4518,7 +4557,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
4518
4557
* . if (scalar cond K|scalar)
4519
4558
* . helper_call(.., scalar, ...) where ARG_CONST is expected
4520
4559
* backtrack through the verifier states and mark all registers and
4521
- * stack slots with spilled constants that these scalar regisers
4560
+ * stack slots with spilled constants that these scalar registers
4522
4561
* should be precise.
4523
4562
* . during state pruning two registers (or spilled stack slots)
4524
4563
* are equivalent if both are not precise.
@@ -18450,7 +18489,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
18450
18489
/* the parentage chains form a tree.
18451
18490
* the verifier states are added to state lists at given insn and
18452
18491
* pushed into state stack for future exploration.
18453
- * when the verifier reaches bpf_exit insn some of the verifer states
18492
+ * when the verifier reaches bpf_exit insn some of the verifier states
18454
18493
* stored in the state lists have their final liveness state already,
18455
18494
* but a lot of states will get revised from liveness point of view when
18456
18495
* the verifier explores other branches.
@@ -19166,7 +19205,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
19166
19205
* terminology) calls specially: as opposed to bounded BPF loops, it *expects*
19167
19206
* states to match, which otherwise would look like an infinite loop. So while
19168
19207
* iter_next() calls are taken care of, we still need to be careful and
19169
- * prevent erroneous and too eager declaration of "ininite loop", when
19208
+ * prevent erroneous and too eager declaration of "infinite loop", when
19170
19209
* iterators are involved.
19171
19210
*
19172
19211
* Here's a situation in pseudo-BPF assembly form:
@@ -19208,7 +19247,7 @@ static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
19208
19247
*
19209
19248
* This approach allows to keep infinite loop heuristic even in the face of
19210
19249
* active iterator. E.g., C snippet below is and will be detected as
19211
- * inifintely looping:
19250
+ * infinitely looping:
19212
19251
*
19213
19252
* struct bpf_iter_num it;
19214
19253
* int *p, x;
@@ -24449,7 +24488,7 @@ static int compute_scc(struct bpf_verifier_env *env)
24449
24488
* if pre[i] == 0:
24450
24489
* recur(i)
24451
24490
*
24452
- * Below implementation replaces explicit recusion with array 'dfs'.
24491
+ * Below implementation replaces explicit recursion with array 'dfs'.
24453
24492
*/
24454
24493
for (i = 0; i < insn_cnt; i++) {
24455
24494
if (pre[i])
0 commit comments