Skip to content

Commit 128a6a1

Browse files
cupermirKernel Patches Daemon
authored andcommitted
bpf: verifier improvement in 32bit shift sign extension pattern
This patch improves the verifier to correctly compute bounds for sign extension compiler pattern composed of left shift by 32bits followed by a sign right shift by 32bits. Pattern in the verifier was limitted to positive value bounds and would reset bound computation for negative values. New code allows both positive and negative values for sign extension without compromising bound computation and verifier to pass. This change is required by GCC which generate such pattern, and was detected in the context of systemd, as described in the following GCC bugzilla: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=119731 Three new tests were added in verifier_subreg.c. Signed-off-by: Cupertino Miranda <[email protected]> Signed-off-by: Andrew Pinski <[email protected]> Cc: David Faust <[email protected]> Cc: Jose Marchesi <[email protected]> Cc: Elena Zannoni <[email protected]> Acked-by: Eduard Zingerman <[email protected]>
1 parent 3282beb commit 128a6a1

File tree

2 files changed

+77
-11
lines changed

2 files changed

+77
-11
lines changed

kernel/bpf/verifier.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15301,21 +15301,19 @@ static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
1530115301
u64 umin_val, u64 umax_val)
1530215302
{
1530315303
/* Special case <<32 because it is a common compiler pattern to sign
15304-
* extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
15305-
* positive we know this shift will also be positive so we can track
15306-
* bounds correctly. Otherwise we lose all sign bit information except
15307-
* what we can pick up from var_off. Perhaps we can generalize this
15308-
* later to shifts of any length.
15304+
* extend subreg by doing <<32 s>>32. When the shift is below the
15305+
* sign extension (32 bits in this case), which is always true when we
15306+
* cast the s32 to s64, the result will always be a valid number
15307+
* representative of the respective shift and its bounds can be
15308+
* predicted.
1530915309
*/
15310-
if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
15310+
if (umin_val == 32 && umax_val == 32) {
1531115311
dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
15312-
else
15313-
dst_reg->smax_value = S64_MAX;
15314-
15315-
if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
1531615312
dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
15317-
else
15313+
} else {
15314+
dst_reg->smax_value = S64_MAX;
1531815315
dst_reg->smin_value = S64_MIN;
15316+
}
1531915317

1532015318
/* If we might shift our top bit out, then we know nothing */
1532115319
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {

tools/testing/selftests/bpf/progs/verifier_subreg.c

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -531,6 +531,74 @@ __naked void arsh32_imm_zero_extend_check(void)
531531
: __clobber_all);
532532
}
533533

534+
SEC("socket")
535+
__description("arsh32 imm sign positive extend check")
536+
__success __success_unpriv __retval(0)
537+
__naked void arsh32_imm_sign_extend_positive_check(void)
538+
{
539+
asm volatile (" \
540+
call %[bpf_get_prandom_u32]; \
541+
r6 = r0; \
542+
r6 &= 0xfff; \
543+
r6 <<= 32; \
544+
r6 s>>= 32; \
545+
r0 = 0; \
546+
if w6 s>= 0 goto l0_%=; \
547+
r0 /= 0; \
548+
l0_%=: if w6 s<= 4096 goto l1_%=; \
549+
r0 /= 0; \
550+
l1_%=: exit; \
551+
" :
552+
: __imm(bpf_get_prandom_u32)
553+
: __clobber_all);
554+
}
555+
556+
SEC("socket")
557+
__description("arsh32 imm sign negative extend check")
558+
__success __success_unpriv __retval(0)
559+
__naked void arsh32_imm_sign_extend_negative_check(void)
560+
{
561+
asm volatile (" \
562+
call %[bpf_get_prandom_u32]; \
563+
r6 = r0; \
564+
r6 &= 0xfff; \
565+
r6 -= 0xfff; \
566+
r6 <<= 32; \
567+
r6 s>>= 32; \
568+
r0 = 0; \
569+
if w6 s>= -4095 goto l0_%=; \
570+
r0 /= 0; \
571+
l0_%=: if w6 s<= 0 goto l1_%=; \
572+
r0 /= 0; \
573+
l1_%=: exit; \
574+
" :
575+
: __imm(bpf_get_prandom_u32)
576+
: __clobber_all);
577+
}
578+
579+
SEC("socket")
580+
__description("arsh32 imm sign extend check")
581+
__success __success_unpriv __retval(0)
582+
__naked void arsh32_imm_sign_extend_check(void)
583+
{
584+
asm volatile (" \
585+
call %[bpf_get_prandom_u32]; \
586+
r6 = r0; \
587+
r6 &= 0xfff; \
588+
r6 -= 0x7ff; \
589+
r6 <<= 32; \
590+
r6 s>>= 32; \
591+
r0 = 0; \
592+
if w6 s>= -2049 goto l0_%=; \
593+
r0 /= 0; \
594+
l0_%=: if w6 s<= 2048 goto l1_%=; \
595+
r0 /= 0; \
596+
l1_%=: exit; \
597+
" :
598+
: __imm(bpf_get_prandom_u32)
599+
: __clobber_all);
600+
}
601+
534602
SEC("socket")
535603
__description("end16 (to_le) reg zero extend check")
536604
__success __success_unpriv __retval(0)

0 commit comments

Comments
 (0)