Skip to content

Commit 469d638

Browse files
kkdwvdAlexei Starovoitov
authored andcommitted
bpf: Fix sleepable context for async callbacks
Fix the BPF verifier to correctly determine the sleepable context of async callbacks based on the async primitive type rather than the arming program's context. The bug is in in_sleepable() which uses OR logic to check if the current execution context is sleepable. When a sleepable program arms a timer callback, the callback's state correctly has in_sleepable=false, but in_sleepable() would still return true due to env->prog->sleepable being true. This incorrectly allows sleepable helpers like bpf_copy_from_user() inside timer callbacks when armed from sleepable programs, even though timer callbacks always execute in non-sleepable context. Fix in_sleepable() to rely solely on env->cur_state->in_sleepable, and initialize state->in_sleepable to env->prog->sleepable in do_check_common() for the main program entry. This ensures the sleepable context is properly tracked per verification state rather than being overridden by the program's sleepability. The env->cur_state NULL check in in_sleepable() was only needed for do_misc_fixups() which runs after verification when env->cur_state is set to NULL. Update do_misc_fixups() to use env->prog->sleepable directly for the storage_get_function check, and remove the redundant NULL check from in_sleepable(). Introduce is_async_cb_sleepable() helper to explicitly determine async callback sleepability based on the primitive type: - bpf_timer callbacks are never sleepable - bpf_wq and bpf_task_work callbacks are always sleepable Add verifier_bug() check to catch unhandled async callback types, ensuring future additions cannot be silently mishandled. Move the is_task_work_add_kfunc() forward declaration to the top alongside other callback-related helpers. We update push_async_cb() to adjust to the new changes. At the same time, while simplifying in_sleepable(), we notice a problem in do_misc_fixups. Fix storage_get helpers to use GFP_ATOMIC when called from non-sleepable contexts within sleepable programs, such as bpf_timer callbacks. Currently, the check in do_misc_fixups assumes that env->prog->sleepable, previously in_sleepable(env) which only resolved to this check before last commit, holds across the program's execution, but that is not true. Instead, the func_atomic bit must be set whenever we see the function being called in an atomic context. Previously, this is being done when the helper is invoked in atomic contexts in sleepable programs, we can simply just set the value to true without doing an in_sleepable() check. We must also do a standalone in_sleepable() check to handle cases where the async callback itself is armed from a sleepable program, but is itself non-sleepable (e.g., timer callback) and invokes such a helper, thus needing the func_atomic bit to be true for the said call. Adjust do_misc_fixups() to drop any checks regarding sleepable nature of the program, and just depend on the func_atomic bit to decide which GFP flag to pass. Fixes: 81f1d7a ("bpf: wq: add bpf_wq_set_callback_impl") Fixes: b00fa38 ("bpf: Enable non-atomic allocations in local storage") Acked-by: Eduard Zingerman <[email protected]> Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 56b4d16 commit 469d638

File tree

1 file changed

+30
-11
lines changed

1 file changed

+30
-11
lines changed

kernel/bpf/verifier.c

Lines changed: 30 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -515,6 +515,7 @@ static bool is_callback_calling_kfunc(u32 btf_id);
515515
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
516516

517517
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
518+
static bool is_task_work_add_kfunc(u32 func_id);
518519

519520
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
520521
{
@@ -547,6 +548,21 @@ static bool is_async_callback_calling_insn(struct bpf_insn *insn)
547548
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
548549
}
549550

551+
static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
552+
{
553+
/* bpf_timer callbacks are never sleepable. */
554+
if (bpf_helper_call(insn) && insn->imm == BPF_FUNC_timer_set_callback)
555+
return false;
556+
557+
/* bpf_wq and bpf_task_work callbacks are always sleepable. */
558+
if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
559+
(is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
560+
return true;
561+
562+
verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
563+
return false;
564+
}
565+
550566
static bool is_may_goto_insn(struct bpf_insn *insn)
551567
{
552568
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
@@ -5826,8 +5842,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
58265842

58275843
static bool in_sleepable(struct bpf_verifier_env *env)
58285844
{
5829-
return env->prog->sleepable ||
5830-
(env->cur_state && env->cur_state->in_sleepable);
5845+
return env->cur_state->in_sleepable;
58315846
}
58325847

58335848
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -10366,8 +10381,6 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
1036610381
struct bpf_func_state *callee,
1036710382
int insn_idx);
1036810383

10369-
static bool is_task_work_add_kfunc(u32 func_id);
10370-
1037110384
static int set_callee_state(struct bpf_verifier_env *env,
1037210385
struct bpf_func_state *caller,
1037310386
struct bpf_func_state *callee, int insn_idx);
@@ -10586,8 +10599,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
1058610599
env->subprog_info[subprog].is_async_cb = true;
1058710600
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
1058810601
insn_idx, subprog,
10589-
is_bpf_wq_set_callback_impl_kfunc(insn->imm) ||
10590-
is_task_work_add_kfunc(insn->imm));
10602+
is_async_cb_sleepable(env, insn));
1059110603
if (!async_cb)
1059210604
return -EFAULT;
1059310605
callee = async_cb->frame[0];
@@ -11426,7 +11438,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1142611438
return -EINVAL;
1142711439
}
1142811440

11429-
if (in_sleepable(env) && is_storage_get_function(func_id))
11441+
if (is_storage_get_function(func_id))
1143011442
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1143111443
}
1143211444

@@ -11437,7 +11449,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1143711449
return -EINVAL;
1143811450
}
1143911451

11440-
if (in_sleepable(env) && is_storage_get_function(func_id))
11452+
if (is_storage_get_function(func_id))
1144111453
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1144211454
}
1144311455

@@ -11448,10 +11460,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
1144811460
return -EINVAL;
1144911461
}
1145011462

11451-
if (in_sleepable(env) && is_storage_get_function(func_id))
11463+
if (is_storage_get_function(func_id))
1145211464
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
1145311465
}
1145411466

11467+
/*
11468+
* Non-sleepable contexts in sleepable programs (e.g., timer callbacks)
11469+
* are atomic and must use GFP_ATOMIC for storage_get helpers.
11470+
*/
11471+
if (!in_sleepable(env) && is_storage_get_function(func_id))
11472+
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
11473+
1145511474
meta.func_id = func_id;
1145611475
/* check args */
1145711476
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
@@ -22483,8 +22502,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2248322502
}
2248422503

2248522504
if (is_storage_get_function(insn->imm)) {
22486-
if (!in_sleepable(env) ||
22487-
env->insn_aux_data[i + delta].storage_get_func_atomic)
22505+
if (env->insn_aux_data[i + delta].storage_get_func_atomic)
2248822506
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
2248922507
else
2249022508
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
@@ -23154,6 +23172,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
2315423172
state->curframe = 0;
2315523173
state->speculative = false;
2315623174
state->branches = 1;
23175+
state->in_sleepable = env->prog->sleepable;
2315723176
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
2315823177
if (!state->frame[0]) {
2315923178
kfree(state);

0 commit comments

Comments
 (0)