Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 30 additions & 11 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,7 @@ static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);

static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
static bool is_task_work_add_kfunc(u32 func_id);

static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{
Expand Down Expand Up @@ -547,6 +548,21 @@ static bool is_async_callback_calling_insn(struct bpf_insn *insn)
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
}

static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
/* bpf_timer callbacks are never sleepable. */
if (bpf_helper_call(insn) && insn->imm == BPF_FUNC_timer_set_callback)
return false;

/* bpf_wq and bpf_task_work callbacks are always sleepable. */
if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
(is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
return true;

verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
return false;
}

static bool is_may_goto_insn(struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
Expand Down Expand Up @@ -5826,8 +5842,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,

static bool in_sleepable(struct bpf_verifier_env *env)
{
return env->prog->sleepable ||
(env->cur_state && env->cur_state->in_sleepable);
return env->cur_state->in_sleepable;
}

/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
Expand Down Expand Up @@ -10366,8 +10381,6 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
struct bpf_func_state *callee,
int insn_idx);

static bool is_task_work_add_kfunc(u32 func_id);

static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
Expand Down Expand Up @@ -10586,8 +10599,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
env->subprog_info[subprog].is_async_cb = true;
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
insn_idx, subprog,
is_bpf_wq_set_callback_impl_kfunc(insn->imm) ||
is_task_work_add_kfunc(insn->imm));
is_async_cb_sleepable(env, insn));
if (!async_cb)
return -EFAULT;
callee = async_cb->frame[0];
Expand Down Expand Up @@ -11426,7 +11438,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}

if (in_sleepable(env) && is_storage_get_function(func_id))
if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}

Expand All @@ -11437,7 +11449,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}

if (in_sleepable(env) && is_storage_get_function(func_id))
if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}

Expand All @@ -11448,10 +11460,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}

if (in_sleepable(env) && is_storage_get_function(func_id))
if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}

/*
* Non-sleepable contexts in sleepable programs (e.g., timer callbacks)
* are atomic and must use GFP_ATOMIC for storage_get helpers.
*/
if (!in_sleepable(env) && is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;

meta.func_id = func_id;
/* check args */
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
Expand Down Expand Up @@ -22483,8 +22502,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}

if (is_storage_get_function(insn->imm)) {
if (!in_sleepable(env) ||
env->insn_aux_data[i + delta].storage_get_func_atomic)
if (env->insn_aux_data[i + delta].storage_get_func_atomic)
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
else
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
Expand Down Expand Up @@ -23154,6 +23172,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
state->curframe = 0;
state->speculative = false;
state->branches = 1;
state->in_sleepable = env->prog->sleepable;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
if (!state->frame[0]) {
kfree(state);
Expand Down
2 changes: 2 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include "verifier_arena.skel.h"
#include "verifier_arena_large.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_async_cb_context.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bitfield_write.skel.h"
#include "verifier_bounds.skel.h"
Expand Down Expand Up @@ -280,6 +281,7 @@ void test_verifier_array_access(void)
verifier_array_access__elf_bytes,
init_array_access_maps);
}
void test_verifier_async_cb_context(void) { RUN(verifier_async_cb_context); }

static int init_value_ptr_arith_maps(struct bpf_object *obj)
{
Expand Down
181 changes: 181 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */

#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"

char _license[] SEC("license") = "GPL";

/* Timer tests */

struct timer_elem {
struct bpf_timer t;
};

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct timer_elem);
} timer_map SEC(".maps");

static int timer_cb(void *map, int *key, struct bpf_timer *timer)
{
u32 data;
/* Timer callbacks are never sleepable, even from non-sleepable programs */
bpf_copy_from_user(&data, sizeof(data), NULL);
return 0;
}

SEC("fentry/bpf_fentry_test1")
__failure __msg("helper call might sleep in a non-sleepable prog")
int timer_non_sleepable_prog(void *ctx)
{
struct timer_elem *val;
int key = 0;

val = bpf_map_lookup_elem(&timer_map, &key);
if (!val)
return 0;

bpf_timer_init(&val->t, &timer_map, 0);
bpf_timer_set_callback(&val->t, timer_cb);
return 0;
}

SEC("lsm.s/file_open")
__failure __msg("helper call might sleep in a non-sleepable prog")
int timer_sleepable_prog(void *ctx)
{
struct timer_elem *val;
int key = 0;

val = bpf_map_lookup_elem(&timer_map, &key);
if (!val)
return 0;

bpf_timer_init(&val->t, &timer_map, 0);
bpf_timer_set_callback(&val->t, timer_cb);
return 0;
}

/* Workqueue tests */

struct wq_elem {
struct bpf_wq w;
};

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct wq_elem);
} wq_map SEC(".maps");

static int wq_cb(void *map, int *key, void *value)
{
u32 data;
/* Workqueue callbacks are always sleepable, even from non-sleepable programs */
bpf_copy_from_user(&data, sizeof(data), NULL);
return 0;
}

SEC("fentry/bpf_fentry_test1")
__success
int wq_non_sleepable_prog(void *ctx)
{
struct wq_elem *val;
int key = 0;

val = bpf_map_lookup_elem(&wq_map, &key);
if (!val)
return 0;

if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
return 0;
if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
return 0;
return 0;
}

SEC("lsm.s/file_open")
__success
int wq_sleepable_prog(void *ctx)
{
struct wq_elem *val;
int key = 0;

val = bpf_map_lookup_elem(&wq_map, &key);
if (!val)
return 0;

if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
return 0;
if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
return 0;
return 0;
}

/* Task work tests */

struct task_work_elem {
struct bpf_task_work tw;
};

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct task_work_elem);
} task_work_map SEC(".maps");

static int task_work_cb(struct bpf_map *map, void *key, void *value)
{
u32 data;
/* Task work callbacks are always sleepable, even from non-sleepable programs */
bpf_copy_from_user(&data, sizeof(data), NULL);
return 0;
}

SEC("fentry/bpf_fentry_test1")
__success
int task_work_non_sleepable_prog(void *ctx)
{
struct task_work_elem *val;
struct task_struct *task;
int key = 0;

val = bpf_map_lookup_elem(&task_work_map, &key);
if (!val)
return 0;

task = bpf_get_current_task_btf();
if (!task)
return 0;

bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb, NULL);
return 0;
}

SEC("lsm.s/file_open")
__success
int task_work_sleepable_prog(void *ctx)
{
struct task_work_elem *val;
struct task_struct *task;
int key = 0;

val = bpf_map_lookup_elem(&task_work_map, &key);
if (!val)
return 0;

task = bpf_get_current_task_btf();
if (!task)
return 0;

bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb, NULL);
return 0;
}
Loading