Skip to content

Commit 17566cf

Browse files
author
Alexei Starovoitov
committed
Merge branch 'fix-sleepable-context-tracking-for-async-callbacks'
Kumar Kartikeya Dwivedi says: ==================== Fix sleepable context tracking for async callbacks Currently, asynchronous execution primitives set up their callback execution simulation using push_async_cb, which will end up inheriting the sleepable or non-sleepable bit from the program triggering the simulation of the callback. This is incorrect, as the actual execution context of the asynchronous callback has nothing to do with the program arming its execution. This set fixes this oversight, and supplies a few test cases ensuring the correct behavior is tested across different types of primitives (i.e. timer, wq, task_work). While looking at this bug, it was noticed that the GFP flag setting logic for storage_get helpers is also broken, hence fix it while we are at it. PSA: These fixes and unit tests were primarily produced by prompting an AI assistant (Claude), and then modified in minor ways, in an exercise to understand how useful it can be at general kernel development tasks. Changelog: ---------- v1 -> v2 v1: https://lore.kernel.org/bpf/[email protected] * Squash fix for GFP flags into 1st commit. (Eduard) * Add a commit refactoring func_atomic to non_sleepable, make it generic, also set for kfuncs in addition to helpers. (Eduard) * Leave selftest as-is, coverage for global subprogs calling sleepable kfuncs or helpers is provided in rcu_read_lock.c. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 56b4d16 + 5b1b5d3 commit 17566cf

File tree

4 files changed

+221
-18
lines changed

4 files changed

+221
-18
lines changed

include/linux/bpf_verifier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ struct bpf_insn_aux_data {
548548
bool nospec_result; /* result is unsafe under speculation, nospec must follow */
549549
bool zext_dst; /* this insn zero extends dst reg */
550550
bool needs_zext; /* alu op needs to clear upper bits */
551-
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
551+
bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
552552
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
553553
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
554554
u8 alu_state; /* used in combination with alu_limit */

kernel/bpf/verifier.c

Lines changed: 37 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -515,6 +515,7 @@ static bool is_callback_calling_kfunc(u32 btf_id);
515515
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
516516

517517
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
518+
static bool is_task_work_add_kfunc(u32 func_id);
518519

519520
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
520521
{
@@ -547,6 +548,21 @@ static bool is_async_callback_calling_insn(struct bpf_insn *insn)
547548
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
548549
}
549550

551+
static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
552+
{
553+
/* bpf_timer callbacks are never sleepable. */
554+
if (bpf_helper_call(insn) && insn->imm == BPF_FUNC_timer_set_callback)
555+
return false;
556+
557+
/* bpf_wq and bpf_task_work callbacks are always sleepable. */
558+
if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
559+
(is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
560+
return true;
561+
562+
verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
563+
return false;
564+
}
565+
550566
static bool is_may_goto_insn(struct bpf_insn *insn)
551567
{
552568
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
@@ -5826,8 +5842,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
58265842

58275843
static bool in_sleepable(struct bpf_verifier_env *env)
58285844
{
5829-
return env->prog->sleepable ||
5830-
(env->cur_state && env->cur_state->in_sleepable);
5845+
return env->cur_state->in_sleepable;
58315846
}
58325847

58335848
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -10366,8 +10381,6 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
1036610381
struct bpf_func_state *callee,
1036710382
int insn_idx);
1036810383

10369-
static bool is_task_work_add_kfunc(u32 func_id);
10370-
1037110384
static int set_callee_state(struct bpf_verifier_env *env,
1037210385
struct bpf_func_state *caller,
1037310386
struct bpf_func_state *callee, int insn_idx);
@@ -10586,8 +10599,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
1058610599
env->subprog_info[subprog].is_async_cb = true;
1058710600
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
1058810601
insn_idx, subprog,
10589-
is_bpf_wq_set_callback_impl_kfunc(insn->imm) ||
10590-
is_task_work_add_kfunc(insn->imm));
10602+
is_async_cb_sleepable(env, insn));
1059110603
if (!async_cb)
1059210604
return -EFAULT;
1059310605
callee = async_cb->frame[0];
@@ -11359,6 +11371,15 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
1135911371
return *ptr && (*ptr)->func ? 0 : -EINVAL;
1136011372
}
1136111373

11374+
/* Check if we're in a sleepable context. */
11375+
static inline bool in_sleepable_context(struct bpf_verifier_env *env)
11376+
{
11377+
return !env->cur_state->active_rcu_lock &&
11378+
!env->cur_state->active_preempt_locks &&
11379+
!env->cur_state->active_irq_id &&
11380+
in_sleepable(env);
11381+
}
11382+
1136211383
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1136311384
int *insn_idx_p)
1136411385
{
@@ -11425,9 +11446,6 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1142511446
func_id_name(func_id), func_id);
1142611447
return -EINVAL;
1142711448
}
11428-
11429-
if (in_sleepable(env) && is_storage_get_function(func_id))
11430-
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1143111449
}
1143211450

1143311451
if (env->cur_state->active_preempt_locks) {
@@ -11436,9 +11454,6 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1143611454
func_id_name(func_id), func_id);
1143711455
return -EINVAL;
1143811456
}
11439-
11440-
if (in_sleepable(env) && is_storage_get_function(func_id))
11441-
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1144211457
}
1144311458

1144411459
if (env->cur_state->active_irq_id) {
@@ -11447,11 +11462,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1144711462
func_id_name(func_id), func_id);
1144811463
return -EINVAL;
1144911464
}
11450-
11451-
if (in_sleepable(env) && is_storage_get_function(func_id))
11452-
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1145311465
}
1145411466

11467+
/* Track non-sleepable context for helpers. */
11468+
if (!in_sleepable_context(env))
11469+
env->insn_aux_data[insn_idx].non_sleepable = true;
11470+
1145511471
meta.func_id = func_id;
1145611472
/* check args */
1145711473
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
@@ -13861,6 +13877,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1386113877
return -EACCES;
1386213878
}
1386313879

13880+
/* Track non-sleepable context for kfuncs, same as for helpers. */
13881+
if (!in_sleepable_context(env))
13882+
insn_aux->non_sleepable = true;
13883+
1386413884
/* Check the arguments */
1386513885
err = check_kfunc_args(env, &meta, insn_idx);
1386613886
if (err < 0)
@@ -22483,8 +22503,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2248322503
}
2248422504

2248522505
if (is_storage_get_function(insn->imm)) {
22486-
if (!in_sleepable(env) ||
22487-
env->insn_aux_data[i + delta].storage_get_func_atomic)
22506+
if (env->insn_aux_data[i + delta].non_sleepable)
2248822507
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
2248922508
else
2249022509
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
@@ -23154,6 +23173,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
2315423173
state->curframe = 0;
2315523174
state->speculative = false;
2315623175
state->branches = 1;
23176+
state->in_sleepable = env->prog->sleepable;
2315723177
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
2315823178
if (!state->frame[0]) {
2315923179
kfree(state);

tools/testing/selftests/bpf/prog_tests/verifier.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include "verifier_arena.skel.h"
88
#include "verifier_arena_large.skel.h"
99
#include "verifier_array_access.skel.h"
10+
#include "verifier_async_cb_context.skel.h"
1011
#include "verifier_basic_stack.skel.h"
1112
#include "verifier_bitfield_write.skel.h"
1213
#include "verifier_bounds.skel.h"
@@ -280,6 +281,7 @@ void test_verifier_array_access(void)
280281
verifier_array_access__elf_bytes,
281282
init_array_access_maps);
282283
}
284+
void test_verifier_async_cb_context(void) { RUN(verifier_async_cb_context); }
283285

284286
static int init_value_ptr_arith_maps(struct bpf_object *obj)
285287
{
Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
4+
#include <vmlinux.h>
5+
#include <bpf/bpf_helpers.h>
6+
#include <bpf/bpf_tracing.h>
7+
#include "bpf_misc.h"
8+
#include "bpf_experimental.h"
9+
10+
char _license[] SEC("license") = "GPL";
11+
12+
/* Timer tests */
13+
14+
struct timer_elem {
15+
struct bpf_timer t;
16+
};
17+
18+
struct {
19+
__uint(type, BPF_MAP_TYPE_ARRAY);
20+
__uint(max_entries, 1);
21+
__type(key, int);
22+
__type(value, struct timer_elem);
23+
} timer_map SEC(".maps");
24+
25+
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
26+
{
27+
u32 data;
28+
/* Timer callbacks are never sleepable, even from non-sleepable programs */
29+
bpf_copy_from_user(&data, sizeof(data), NULL);
30+
return 0;
31+
}
32+
33+
SEC("fentry/bpf_fentry_test1")
34+
__failure __msg("helper call might sleep in a non-sleepable prog")
35+
int timer_non_sleepable_prog(void *ctx)
36+
{
37+
struct timer_elem *val;
38+
int key = 0;
39+
40+
val = bpf_map_lookup_elem(&timer_map, &key);
41+
if (!val)
42+
return 0;
43+
44+
bpf_timer_init(&val->t, &timer_map, 0);
45+
bpf_timer_set_callback(&val->t, timer_cb);
46+
return 0;
47+
}
48+
49+
SEC("lsm.s/file_open")
50+
__failure __msg("helper call might sleep in a non-sleepable prog")
51+
int timer_sleepable_prog(void *ctx)
52+
{
53+
struct timer_elem *val;
54+
int key = 0;
55+
56+
val = bpf_map_lookup_elem(&timer_map, &key);
57+
if (!val)
58+
return 0;
59+
60+
bpf_timer_init(&val->t, &timer_map, 0);
61+
bpf_timer_set_callback(&val->t, timer_cb);
62+
return 0;
63+
}
64+
65+
/* Workqueue tests */
66+
67+
struct wq_elem {
68+
struct bpf_wq w;
69+
};
70+
71+
struct {
72+
__uint(type, BPF_MAP_TYPE_ARRAY);
73+
__uint(max_entries, 1);
74+
__type(key, int);
75+
__type(value, struct wq_elem);
76+
} wq_map SEC(".maps");
77+
78+
static int wq_cb(void *map, int *key, void *value)
79+
{
80+
u32 data;
81+
/* Workqueue callbacks are always sleepable, even from non-sleepable programs */
82+
bpf_copy_from_user(&data, sizeof(data), NULL);
83+
return 0;
84+
}
85+
86+
SEC("fentry/bpf_fentry_test1")
87+
__success
88+
int wq_non_sleepable_prog(void *ctx)
89+
{
90+
struct wq_elem *val;
91+
int key = 0;
92+
93+
val = bpf_map_lookup_elem(&wq_map, &key);
94+
if (!val)
95+
return 0;
96+
97+
if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
98+
return 0;
99+
if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
100+
return 0;
101+
return 0;
102+
}
103+
104+
SEC("lsm.s/file_open")
105+
__success
106+
int wq_sleepable_prog(void *ctx)
107+
{
108+
struct wq_elem *val;
109+
int key = 0;
110+
111+
val = bpf_map_lookup_elem(&wq_map, &key);
112+
if (!val)
113+
return 0;
114+
115+
if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
116+
return 0;
117+
if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
118+
return 0;
119+
return 0;
120+
}
121+
122+
/* Task work tests */
123+
124+
struct task_work_elem {
125+
struct bpf_task_work tw;
126+
};
127+
128+
struct {
129+
__uint(type, BPF_MAP_TYPE_ARRAY);
130+
__uint(max_entries, 1);
131+
__type(key, int);
132+
__type(value, struct task_work_elem);
133+
} task_work_map SEC(".maps");
134+
135+
static int task_work_cb(struct bpf_map *map, void *key, void *value)
136+
{
137+
u32 data;
138+
/* Task work callbacks are always sleepable, even from non-sleepable programs */
139+
bpf_copy_from_user(&data, sizeof(data), NULL);
140+
return 0;
141+
}
142+
143+
SEC("fentry/bpf_fentry_test1")
144+
__success
145+
int task_work_non_sleepable_prog(void *ctx)
146+
{
147+
struct task_work_elem *val;
148+
struct task_struct *task;
149+
int key = 0;
150+
151+
val = bpf_map_lookup_elem(&task_work_map, &key);
152+
if (!val)
153+
return 0;
154+
155+
task = bpf_get_current_task_btf();
156+
if (!task)
157+
return 0;
158+
159+
bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb, NULL);
160+
return 0;
161+
}
162+
163+
SEC("lsm.s/file_open")
164+
__success
165+
int task_work_sleepable_prog(void *ctx)
166+
{
167+
struct task_work_elem *val;
168+
struct task_struct *task;
169+
int key = 0;
170+
171+
val = bpf_map_lookup_elem(&task_work_map, &key);
172+
if (!val)
173+
return 0;
174+
175+
task = bpf_get_current_task_btf();
176+
if (!task)
177+
return 0;
178+
179+
bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb, NULL);
180+
return 0;
181+
}

0 commit comments

Comments
 (0)