Skip to content

Commit 26f4531

Browse files
committed
Daniel Borkmann says: ==================== pull-request: bpf-next 2024-07-12 We've added 23 non-merge commits during the last 3 day(s) which contain a total of 18 files changed, 234 insertions(+), 243 deletions(-). The main changes are: 1) Improve BPF verifier by utilizing overflow.h helpers to check for overflows, from Shung-Hsi Yu. 2) Fix NULL pointer dereference in resolve_prog_type() for BPF_PROG_TYPE_EXT when attr->attach_prog_fd was not specified, from Tengda Wu. 3) Fix arm64 BPF JIT when generating code for BPF trampolines with BPF_TRAMP_F_CALL_ORIG which corrupted upper address bits, from Puranjay Mohan. 4) Remove test_run callback from lwt_seg6local_prog_ops which never worked in the first place and caused syzbot reports, from Sebastian Andrzej Siewior. 5) Relax BPF verifier to accept non-zero offset on KF_TRUSTED_ARGS/ /KF_RCU-typed BPF kfuncs, from Matt Bobrowski. 6) Fix a long standing bug in libbpf with regards to handling of BPF skeleton's forward and backward compatibility, from Andrii Nakryiko. 7) Annotate btf_{seq,snprintf}_show functions with __printf, from Alan Maguire. 8) BPF selftest improvements to reuse common network helpers in sk_lookup test and dropping the open-coded inetaddr_len() and make_socket() ones, from Geliang Tang. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (23 commits) selftests/bpf: Test for null-pointer-deref bugfix in resolve_prog_type() bpf: Fix null pointer dereference in resolve_prog_type() for BPF_PROG_TYPE_EXT selftests/bpf: DENYLIST.aarch64: Skip fexit_sleep again bpf: use check_sub_overflow() to check for subtraction overflows bpf: use check_add_overflow() to check for addition overflows bpf: fix overflow check in adjust_jmp_off() bpf: Eliminate remaining "make W=1" warnings in kernel/bpf/btf.o bpf: annotate BTF show functions with __printf bpf, arm64: Fix trampoline for BPF_TRAMP_F_CALL_ORIG selftests/bpf: Close obj in error path in xdp_adjust_tail selftests/bpf: Null checks for links in bpf_tcp_ca selftests/bpf: Use connect_fd_to_fd in sk_lookup selftests/bpf: Use start_server_addr in sk_lookup selftests/bpf: Use start_server_str in sk_lookup selftests/bpf: Close fd in error path in drop_on_reuseport selftests/bpf: Add ASSERT_OK_FD macro selftests/bpf: Add backlog for network_helper_opts selftests/bpf: fix compilation failure when CONFIG_NF_FLOW_TABLE=m bpf: Remove tst_run from lwt_seg6local_prog_ops. bpf: relax zero fixed offset constraint on KF_TRUSTED_ARGS/KF_RCU ... ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents e5abd12 + e435b04 commit 26f4531

File tree

18 files changed

+234
-243
lines changed

18 files changed

+234
-243
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2147,7 +2147,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
21472147
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
21482148

21492149
if (flags & BPF_TRAMP_F_CALL_ORIG) {
2150-
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
2150+
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
21512151
emit_call((const u64)__bpf_tramp_enter, ctx);
21522152
}
21532153

@@ -2191,7 +2191,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
21912191

21922192
if (flags & BPF_TRAMP_F_CALL_ORIG) {
21932193
im->ip_epilogue = ctx->ro_image + ctx->idx;
2194-
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
2194+
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
21952195
emit_call((const u64)__bpf_tramp_exit, ctx);
21962196
}
21972197

include/linux/bpf_verifier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -856,7 +856,7 @@ static inline u32 type_flag(u32 type)
856856
/* only use after check_attach_btf_id() */
857857
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
858858
{
859-
return prog->type == BPF_PROG_TYPE_EXT ?
859+
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
860860
prog->aux->dst_prog->type : prog->type;
861861
}
862862

kernel/bpf/btf.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ const char *btf_type_str(const struct btf_type *t)
415415
struct btf_show {
416416
u64 flags;
417417
void *target; /* target of show operation (seq file, buffer) */
418-
void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
418+
__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
419419
const struct btf *btf;
420420
/* below are used during iteration */
421421
struct {
@@ -7538,8 +7538,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
75387538
btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
75397539
}
75407540

7541-
static void btf_seq_show(struct btf_show *show, const char *fmt,
7542-
va_list args)
7541+
__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
7542+
va_list args)
75437543
{
75447544
seq_vprintf((struct seq_file *)show->target, fmt, args);
75457545
}
@@ -7572,8 +7572,8 @@ struct btf_show_snprintf {
75727572
int len; /* length we would have written */
75737573
};
75747574

7575-
static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7576-
va_list args)
7575+
__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7576+
va_list args)
75777577
{
75787578
struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
75797579
int len;

kernel/bpf/verifier.c

Lines changed: 51 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -11335,7 +11335,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
1133511335
btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
1133611336
strict_type_match = true;
1133711337

11338-
WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
11338+
WARN_ON_ONCE(is_kfunc_release(meta) &&
11339+
(reg->off || !tnum_is_const(reg->var_off) ||
11340+
reg->var_off.value));
1133911341

1134011342
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
1134111343
reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
@@ -11917,12 +11919,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
1191711919
return -EINVAL;
1191811920
}
1191911921
}
11920-
1192111922
fallthrough;
1192211923
case KF_ARG_PTR_TO_CTX:
11923-
/* Trusted arguments have the same offset checks as release arguments */
11924-
arg_type |= OBJ_RELEASE;
11925-
break;
1192611924
case KF_ARG_PTR_TO_DYNPTR:
1192711925
case KF_ARG_PTR_TO_ITER:
1192811926
case KF_ARG_PTR_TO_LIST_HEAD:
@@ -11935,7 +11933,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
1193511933
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
1193611934
case KF_ARG_PTR_TO_CONST_STR:
1193711935
case KF_ARG_PTR_TO_WORKQUEUE:
11938-
/* Trusted by default */
1193911936
break;
1194011937
default:
1194111938
WARN_ON_ONCE(1);
@@ -12729,56 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1272912726
return 0;
1273012727
}
1273112728

12732-
static bool signed_add_overflows(s64 a, s64 b)
12733-
{
12734-
/* Do the add in u64, where overflow is well-defined */
12735-
s64 res = (s64)((u64)a + (u64)b);
12736-
12737-
if (b < 0)
12738-
return res > a;
12739-
return res < a;
12740-
}
12741-
12742-
static bool signed_add32_overflows(s32 a, s32 b)
12743-
{
12744-
/* Do the add in u32, where overflow is well-defined */
12745-
s32 res = (s32)((u32)a + (u32)b);
12746-
12747-
if (b < 0)
12748-
return res > a;
12749-
return res < a;
12750-
}
12751-
12752-
static bool signed_add16_overflows(s16 a, s16 b)
12753-
{
12754-
/* Do the add in u16, where overflow is well-defined */
12755-
s16 res = (s16)((u16)a + (u16)b);
12756-
12757-
if (b < 0)
12758-
return res > a;
12759-
return res < a;
12760-
}
12761-
12762-
static bool signed_sub_overflows(s64 a, s64 b)
12763-
{
12764-
/* Do the sub in u64, where overflow is well-defined */
12765-
s64 res = (s64)((u64)a - (u64)b);
12766-
12767-
if (b < 0)
12768-
return res < a;
12769-
return res > a;
12770-
}
12771-
12772-
static bool signed_sub32_overflows(s32 a, s32 b)
12773-
{
12774-
/* Do the sub in u32, where overflow is well-defined */
12775-
s32 res = (s32)((u32)a - (u32)b);
12776-
12777-
if (b < 0)
12778-
return res < a;
12779-
return res > a;
12780-
}
12781-
1278212729
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
1278312730
const struct bpf_reg_state *reg,
1278412731
enum bpf_reg_type type)
@@ -13260,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1326013207
* added into the variable offset, and we copy the fixed offset
1326113208
* from ptr_reg.
1326213209
*/
13263-
if (signed_add_overflows(smin_ptr, smin_val) ||
13264-
signed_add_overflows(smax_ptr, smax_val)) {
13210+
if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
13211+
check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
1326513212
dst_reg->smin_value = S64_MIN;
1326613213
dst_reg->smax_value = S64_MAX;
13267-
} else {
13268-
dst_reg->smin_value = smin_ptr + smin_val;
13269-
dst_reg->smax_value = smax_ptr + smax_val;
1327013214
}
13271-
if (umin_ptr + umin_val < umin_ptr ||
13272-
umax_ptr + umax_val < umax_ptr) {
13215+
if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
13216+
check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
1327313217
dst_reg->umin_value = 0;
1327413218
dst_reg->umax_value = U64_MAX;
13275-
} else {
13276-
dst_reg->umin_value = umin_ptr + umin_val;
13277-
dst_reg->umax_value = umax_ptr + umax_val;
1327813219
}
1327913220
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1328013221
dst_reg->off = ptr_reg->off;
@@ -13317,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1331713258
/* A new variable offset is created. If the subtrahend is known
1331813259
* nonnegative, then any reg->range we had before is still good.
1331913260
*/
13320-
if (signed_sub_overflows(smin_ptr, smax_val) ||
13321-
signed_sub_overflows(smax_ptr, smin_val)) {
13261+
if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
13262+
check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
1332213263
/* Overflow possible, we know nothing */
1332313264
dst_reg->smin_value = S64_MIN;
1332413265
dst_reg->smax_value = S64_MAX;
13325-
} else {
13326-
dst_reg->smin_value = smin_ptr - smax_val;
13327-
dst_reg->smax_value = smax_ptr - smin_val;
1332813266
}
1332913267
if (umin_ptr < umax_val) {
1333013268
/* Overflow possible, we know nothing */
@@ -13377,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1337713315
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
1337813316
struct bpf_reg_state *src_reg)
1337913317
{
13380-
s32 smin_val = src_reg->s32_min_value;
13381-
s32 smax_val = src_reg->s32_max_value;
13382-
u32 umin_val = src_reg->u32_min_value;
13383-
u32 umax_val = src_reg->u32_max_value;
13318+
s32 *dst_smin = &dst_reg->s32_min_value;
13319+
s32 *dst_smax = &dst_reg->s32_max_value;
13320+
u32 *dst_umin = &dst_reg->u32_min_value;
13321+
u32 *dst_umax = &dst_reg->u32_max_value;
1338413322

13385-
if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
13386-
signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
13387-
dst_reg->s32_min_value = S32_MIN;
13388-
dst_reg->s32_max_value = S32_MAX;
13389-
} else {
13390-
dst_reg->s32_min_value += smin_val;
13391-
dst_reg->s32_max_value += smax_val;
13323+
if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
13324+
check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
13325+
*dst_smin = S32_MIN;
13326+
*dst_smax = S32_MAX;
1339213327
}
13393-
if (dst_reg->u32_min_value + umin_val < umin_val ||
13394-
dst_reg->u32_max_value + umax_val < umax_val) {
13395-
dst_reg->u32_min_value = 0;
13396-
dst_reg->u32_max_value = U32_MAX;
13397-
} else {
13398-
dst_reg->u32_min_value += umin_val;
13399-
dst_reg->u32_max_value += umax_val;
13328+
if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
13329+
check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {
13330+
*dst_umin = 0;
13331+
*dst_umax = U32_MAX;
1340013332
}
1340113333
}
1340213334

1340313335
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
1340413336
struct bpf_reg_state *src_reg)
1340513337
{
13406-
s64 smin_val = src_reg->smin_value;
13407-
s64 smax_val = src_reg->smax_value;
13408-
u64 umin_val = src_reg->umin_value;
13409-
u64 umax_val = src_reg->umax_value;
13338+
s64 *dst_smin = &dst_reg->smin_value;
13339+
s64 *dst_smax = &dst_reg->smax_value;
13340+
u64 *dst_umin = &dst_reg->umin_value;
13341+
u64 *dst_umax = &dst_reg->umax_value;
1341013342

13411-
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
13412-
signed_add_overflows(dst_reg->smax_value, smax_val)) {
13413-
dst_reg->smin_value = S64_MIN;
13414-
dst_reg->smax_value = S64_MAX;
13415-
} else {
13416-
dst_reg->smin_value += smin_val;
13417-
dst_reg->smax_value += smax_val;
13343+
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
13344+
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
13345+
*dst_smin = S64_MIN;
13346+
*dst_smax = S64_MAX;
1341813347
}
13419-
if (dst_reg->umin_value + umin_val < umin_val ||
13420-
dst_reg->umax_value + umax_val < umax_val) {
13421-
dst_reg->umin_value = 0;
13422-
dst_reg->umax_value = U64_MAX;
13423-
} else {
13424-
dst_reg->umin_value += umin_val;
13425-
dst_reg->umax_value += umax_val;
13348+
if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
13349+
check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {
13350+
*dst_umin = 0;
13351+
*dst_umax = U64_MAX;
1342613352
}
1342713353
}
1342813354

1342913355
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
1343013356
struct bpf_reg_state *src_reg)
1343113357
{
13432-
s32 smin_val = src_reg->s32_min_value;
13433-
s32 smax_val = src_reg->s32_max_value;
13358+
s32 *dst_smin = &dst_reg->s32_min_value;
13359+
s32 *dst_smax = &dst_reg->s32_max_value;
1343413360
u32 umin_val = src_reg->u32_min_value;
1343513361
u32 umax_val = src_reg->u32_max_value;
1343613362

13437-
if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
13438-
signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
13363+
if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
13364+
check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
1343913365
/* Overflow possible, we know nothing */
13440-
dst_reg->s32_min_value = S32_MIN;
13441-
dst_reg->s32_max_value = S32_MAX;
13442-
} else {
13443-
dst_reg->s32_min_value -= smax_val;
13444-
dst_reg->s32_max_value -= smin_val;
13366+
*dst_smin = S32_MIN;
13367+
*dst_smax = S32_MAX;
1344513368
}
1344613369
if (dst_reg->u32_min_value < umax_val) {
1344713370
/* Overflow possible, we know nothing */
@@ -13457,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
1345713380
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
1345813381
struct bpf_reg_state *src_reg)
1345913382
{
13460-
s64 smin_val = src_reg->smin_value;
13461-
s64 smax_val = src_reg->smax_value;
13383+
s64 *dst_smin = &dst_reg->smin_value;
13384+
s64 *dst_smax = &dst_reg->smax_value;
1346213385
u64 umin_val = src_reg->umin_value;
1346313386
u64 umax_val = src_reg->umax_value;
1346413387

13465-
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13466-
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
13388+
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
13389+
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
1346713390
/* Overflow possible, we know nothing */
13468-
dst_reg->smin_value = S64_MIN;
13469-
dst_reg->smax_value = S64_MAX;
13470-
} else {
13471-
dst_reg->smin_value -= smax_val;
13472-
dst_reg->smax_value -= smin_val;
13391+
*dst_smin = S64_MIN;
13392+
*dst_smax = S64_MAX;
1347313393
}
1347413394
if (dst_reg->umin_value < umax_val) {
1347513395
/* Overflow possible, we know nothing */
@@ -18838,6 +18758,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
1883818758
{
1883918759
struct bpf_insn *insn = prog->insnsi;
1884018760
u32 insn_cnt = prog->len, i;
18761+
s32 imm;
18762+
s16 off;
1884118763

1884218764
for (i = 0; i < insn_cnt; i++, insn++) {
1884318765
u8 code = insn->code;
@@ -18849,15 +18771,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
1884918771
if (insn->code == (BPF_JMP32 | BPF_JA)) {
1885018772
if (i + 1 + insn->imm != tgt_idx)
1885118773
continue;
18852-
if (signed_add32_overflows(insn->imm, delta))
18774+
if (check_add_overflow(insn->imm, delta, &imm))
1885318775
return -ERANGE;
18854-
insn->imm += delta;
18776+
insn->imm = imm;
1885518777
} else {
1885618778
if (i + 1 + insn->off != tgt_idx)
1885718779
continue;
18858-
if (signed_add16_overflows(insn->imm, delta))
18780+
if (check_add_overflow(insn->off, delta, &off))
1885918781
return -ERANGE;
18860-
insn->off += delta;
18782+
insn->off = off;
1886118783
}
1886218784
}
1886318785
return 0;

net/core/filter.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11053,7 +11053,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
1105311053
};
1105411054

1105511055
const struct bpf_prog_ops lwt_seg6local_prog_ops = {
11056-
.test_run = bpf_prog_test_run_skb,
1105711056
};
1105811057

1105911058
const struct bpf_verifier_ops cg_sock_verifier_ops = {

0 commit comments

Comments
 (0)