Skip to content

Commit 2ed6cc7

Browse files
mannkafaiKernel Patches Daemon
authored andcommitted
bpf: Skip bounds adjustment for conditional jumps on same scalar register
When conditional jumps are performed on the same scalar register (e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly attempts to adjust the register's min/max bounds. This leads to invalid range bounds and triggers a BUG warning. The problematic BPF program: 0: call bpf_get_prandom_u32 1: w8 = 0x80000000 2: r0 &= r8 3: if r0 > r0 goto <exit> The instruction 3 triggers kernel warning: 3: if r0 > r0 goto <exit> true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0) true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0) Comparing a register with itself should not change its bounds and for most comparison operations, comparing a register with itself has a known result (e.g., r0 == r0 is always true, r0 < r0 is always false). Fix this by: 1. Enhance is_scalar_branch_taken() to properly handle branch direction computation for same register comparisons across all BPF jump operations 2. Adds early return in reg_set_min_max() to avoid bounds adjustment for unknown branch directions (e.g., BPF_JSET) on the same register The fix ensures that unnecessary bounds adjustments are skipped, preventing the verifier bug while maintaining correct branch direction analysis. Reported-by: Kaiyan Mei <[email protected]> Reported-by: Yinhao Hu <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Signed-off-by: KaFai Wan <[email protected]>
1 parent 9a71dd4 commit 2ed6cc7

File tree

1 file changed

+33
-0
lines changed

1 file changed

+33
-0
lines changed

kernel/bpf/verifier.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
1599515995

1599615996
switch (opcode) {
1599715997
case BPF_JEQ:
15998+
if (reg1 == reg2)
15999+
return 1;
1599816000
/* constants, umin/umax and smin/smax checks would be
1599916001
* redundant in this case because they all should match
1600016002
*/
@@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
1602116023
}
1602216024
break;
1602316025
case BPF_JNE:
16026+
if (reg1 == reg2)
16027+
return 0;
1602416028
/* constants, umin/umax and smin/smax checks would be
1602516029
* redundant in this case because they all should match
1602616030
*/
@@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
1604716051
}
1604816052
break;
1604916053
case BPF_JSET:
16054+
if (reg1 == reg2) {
16055+
if (tnum_is_const(t1))
16056+
return t1.value != 0;
16057+
else
16058+
return (smin1 <= 0 && smax1 >= 0) ? -1 : 1;
16059+
}
1605016060
if (!is_reg_const(reg2, is_jmp32)) {
1605116061
swap(reg1, reg2);
1605216062
swap(t1, t2);
@@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
1605916069
return 0;
1606016070
break;
1606116071
case BPF_JGT:
16072+
if (reg1 == reg2)
16073+
return 0;
1606216074
if (umin1 > umax2)
1606316075
return 1;
1606416076
else if (umax1 <= umin2)
1606516077
return 0;
1606616078
break;
1606716079
case BPF_JSGT:
16080+
if (reg1 == reg2)
16081+
return 0;
1606816082
if (smin1 > smax2)
1606916083
return 1;
1607016084
else if (smax1 <= smin2)
1607116085
return 0;
1607216086
break;
1607316087
case BPF_JLT:
16088+
if (reg1 == reg2)
16089+
return 0;
1607416090
if (umax1 < umin2)
1607516091
return 1;
1607616092
else if (umin1 >= umax2)
1607716093
return 0;
1607816094
break;
1607916095
case BPF_JSLT:
16096+
if (reg1 == reg2)
16097+
return 0;
1608016098
if (smax1 < smin2)
1608116099
return 1;
1608216100
else if (smin1 >= smax2)
1608316101
return 0;
1608416102
break;
1608516103
case BPF_JGE:
16104+
if (reg1 == reg2)
16105+
return 1;
1608616106
if (umin1 >= umax2)
1608716107
return 1;
1608816108
else if (umax1 < umin2)
1608916109
return 0;
1609016110
break;
1609116111
case BPF_JSGE:
16112+
if (reg1 == reg2)
16113+
return 1;
1609216114
if (smin1 >= smax2)
1609316115
return 1;
1609416116
else if (smax1 < smin2)
1609516117
return 0;
1609616118
break;
1609716119
case BPF_JLE:
16120+
if (reg1 == reg2)
16121+
return 1;
1609816122
if (umax1 <= umin2)
1609916123
return 1;
1610016124
else if (umin1 > umax2)
1610116125
return 0;
1610216126
break;
1610316127
case BPF_JSLE:
16128+
if (reg1 == reg2)
16129+
return 1;
1610416130
if (smax1 <= smin2)
1610516131
return 1;
1610616132
else if (smin1 > smax2)
@@ -16439,6 +16465,13 @@ static int reg_set_min_max(struct bpf_verifier_env *env,
1643916465
if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
1644016466
return 0;
1644116467

16468+
/* We compute branch direction for same SCALAR_VALUE registers in
16469+
* is_scalar_branch_taken(). For unknown branch directions (e.g., BPF_JSET)
16470+
* on the same registers, we don't need to adjust the min/max values.
16471+
*/
16472+
if (false_reg1 == false_reg2)
16473+
return 0;
16474+
1644216475
/* fallthrough (FALSE) branch */
1644316476
regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
1644416477
reg_bounds_sync(false_reg1);

0 commit comments

Comments
 (0)