Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 7 additions & 11 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -15301,21 +15301,17 @@ static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{
/* Special case <<32 because it is a common compiler pattern to sign
* extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
* positive we know this shift will also be positive so we can track
* bounds correctly. Otherwise we lose all sign bit information except
* what we can pick up from var_off. Perhaps we can generalize this
* later to shifts of any length.
* extend subreg by doing <<32 s>>32. smin/smax assignments are correct
* because s32 bounds don't flip sign when shifting to the left by
* 32bits.
*/
if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
if (umin_val == 32 && umax_val == 32) {
dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
else
dst_reg->smax_value = S64_MAX;

if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
else
} else {
dst_reg->smax_value = S64_MAX;
dst_reg->smin_value = S64_MIN;
}

/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
Expand Down
70 changes: 70 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_subreg.c
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,76 @@ __naked void arsh32_imm_zero_extend_check(void)
: __clobber_all);
}

SEC("socket")
__description("arsh32 imm sign positive extend check")
__success __retval(0)
__naked void arsh32_imm_sign_extend_positive_check(void)
__log_level(2)
__msg("2: (57) r6 &= 4095 ; R6=scalar(smin=smin32=0,smax=umax=smax32=umax32=4095,var_off=(0x0; 0xfff))")
__msg("3: (67) r6 <<= 32 ; R6=scalar(smin=smin32=0,smax=umax=0xfff00000000,smax32=umax32=0,var_off=(0x0; 0xfff00000000))")
__msg("4: (c7) r6 s>>= 32 ; R6=scalar(smin=smin32=0,smax=umax=smax32=umax32=4095,var_off=(0x0; 0xfff))")

{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 &= 4095; \
r6 <<= 32; \
r6 s>>= 32; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}

SEC("socket")
__description("arsh32 imm sign negative extend check")
__success __retval(0)
__naked void arsh32_imm_sign_extend_negative_check(void)
__log_level(2)
__msg("3: (17) r6 -= 4095 ; R6=scalar(smin=smin32=-4095,smax=smax32=0)")
__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff00100000000,smax=smax32=umax32=0,umax=0xffffffff00000000,smin32=0,var_off=(0x0; 0xffffffff00000000))")
__msg("5: (c7) r6 s>>= 32 ; R6=scalar(smin=smin32=-4095,smax=smax32=0)")

{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 &= 4095; \
r6 -= 4095; \
r6 <<= 32; \
r6 s>>= 32; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}

SEC("socket")
__description("arsh32 imm sign extend check")
__success __retval(0)
__naked void arsh32_imm_sign_extend_check(void)
__log_level(2)
__msg("3: (17) r6 -= 2047 ; R6=scalar(smin=smin32=-2047,smax=smax32=2048)")
__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff80100000000,smax=0x80000000000,umax=0xffffffff00000000,smin32=0,smax32=umax32=0,var_off=(0x0; 0xffffffff00000000))")
__msg("5: (c7) r6 s>>= 32 ; R6=scalar(smin=smin32=-2047,smax=smax32=2048)")
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 &= 4095; \
r6 -= 2047; \
r6 <<= 32; \
r6 s>>= 32; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}

SEC("socket")
__description("end16 (to_le) reg zero extend check")
__success __success_unpriv __retval(0)
Expand Down
Loading