Skip to content

Commit aa12780

Browse files
committed
bpf: task work scheduling kfuncs
Implementation of the bpf_task_work_schedule kfuncs. Main components: * struct bpf_task_work_context – Metadata and state management per task work. * enum bpf_task_work_state – A state machine to serialize work scheduling and execution. * bpf_task_work_schedule() – The central helper that initiates scheduling. * bpf_task_work_callback() – Invoked when the actual task_work runs. * bpf_task_work_irq() – An intermediate step (runs in softirq context) to enqueue task work. * bpf_task_work_cancel_and_free() – Cleanup for deleted BPF map entries. Flow of task work scheduling 1) bpf_task_work_schedule_* is called from BPF code. 2) Transition state from STANDBY to PENDING. 3) irq_work_queue() schedules bpf_task_work_irq(). 4) Transition state from PENDING to SCHEDULING. 4) bpf_task_work_irq() attempts task_work_add(). If successful, state transitions to SCHEDULED. 5) Task work calls bpf_task_work_callback(), which transition state to RUNNING. 6) BPF callback is executed 7) Context is cleaned up, refcounts released, state set back to STANDBY. Map value deletion If map value that contains bpf_task_work_context is deleted, BPF map implementation calls bpf_task_work_cancel_and_free(). Deletion is handled by atomically setting state to FREED and releasing references or letting scheduler do that, depending on the last state before the deletion: * SCHEDULING: release references in bpf_task_work_cancel_and_free(), expect bpf_task_work_irq() to cancel task work. * SCHEDULED: release references and try to cancel task work in bpf_task_work_cancel_and_free(). * other states: one of bpf_task_work_irq(), bpf_task_work_schedule(), bpf_task_work_callback() should cleanup upon detecting the state switching to FREED. The state transitions are controlled with atomic_cmpxchg, ensuring: * Only one thread can successfully enqueue work. * Proper handling of concurrent deletes (BPF_TW_FREED). * Safe rollback if task_work_add() fails. Signed-off-by: Mykyta Yatsenko <[email protected]>
1 parent 30e54c7 commit aa12780

File tree

1 file changed

+186
-2
lines changed

1 file changed

+186
-2
lines changed

kernel/bpf/helpers.c

Lines changed: 186 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
#include <linux/kasan.h>
2626
#include <linux/bpf_verifier.h>
2727
#include <linux/uaccess.h>
28+
#include <linux/task_work.h>
29+
#include <linux/irq_work.h>
2830

2931
#include "../../lib/kstrtox.h"
3032

@@ -3702,6 +3704,160 @@ __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
37023704

37033705
typedef void (*bpf_task_work_callback_t)(struct bpf_map *, void *, void *);
37043706

3707+
enum bpf_task_work_state {
3708+
/* bpf_task_work is ready to be used */
3709+
BPF_TW_STANDBY = 0,
3710+
/* bpf_task_work is getting scheduled into irq_work */
3711+
BPF_TW_PENDING,
3712+
/* bpf_task_work is in irq_work and getting scheduled into task_work */
3713+
BPF_TW_SCHEDULING,
3714+
/* bpf_task_work is scheduled into task_work successfully */
3715+
BPF_TW_SCHEDULED,
3716+
/* callback is running */
3717+
BPF_TW_RUNNING,
3718+
/* BPF map value storing this bpf_task_work is deleted */
3719+
BPF_TW_FREED,
3720+
};
3721+
3722+
struct bpf_task_work_context {
3723+
/* map that contains this structure in a value */
3724+
struct bpf_map *map;
3725+
/* bpf_task_work_state value, representing the state */
3726+
atomic_t state;
3727+
/* bpf_prog that schedules task work */
3728+
struct bpf_prog *prog;
3729+
/* task for which callback is scheduled */
3730+
struct task_struct *task;
3731+
/* notification mode for task work scheduling */
3732+
enum task_work_notify_mode mode;
3733+
/* callback to call from task work */
3734+
bpf_task_work_callback_t callback_fn;
3735+
struct callback_head work;
3736+
struct irq_work irq_work;
3737+
} __aligned(8);
3738+
3739+
static bool task_work_match(struct callback_head *head, void *data)
3740+
{
3741+
struct bpf_task_work_context *ctx = container_of(head, struct bpf_task_work_context, work);
3742+
3743+
return ctx == data;
3744+
}
3745+
3746+
static void bpf_reset_task_work_context(struct bpf_task_work_context *ctx)
3747+
{
3748+
bpf_prog_put(ctx->prog);
3749+
bpf_task_release(ctx->task);
3750+
rcu_assign_pointer(ctx->map, NULL);
3751+
}
3752+
3753+
static void bpf_task_work_callback(struct callback_head *cb)
3754+
{
3755+
enum bpf_task_work_state state;
3756+
struct bpf_task_work_context *ctx;
3757+
struct bpf_map *map;
3758+
u32 idx;
3759+
void *key;
3760+
void *value;
3761+
3762+
rcu_read_lock_trace();
3763+
ctx = container_of(cb, struct bpf_task_work_context, work);
3764+
3765+
state = atomic_cmpxchg_acquire(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
3766+
if (state == BPF_TW_SCHEDULED)
3767+
state = atomic_cmpxchg_acquire(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
3768+
if (state == BPF_TW_FREED)
3769+
goto out;
3770+
3771+
map = rcu_dereference(ctx->map);
3772+
if (!map)
3773+
goto out;
3774+
3775+
value = (void *)ctx - map->record->task_work_off;
3776+
key = (void *)map_key_from_value(map, value, &idx);
3777+
3778+
migrate_disable();
3779+
ctx->callback_fn(map, key, value);
3780+
migrate_enable();
3781+
3782+
/* State is running or freed, either way reset. */
3783+
bpf_reset_task_work_context(ctx);
3784+
atomic_cmpxchg_release(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
3785+
out:
3786+
rcu_read_unlock_trace();
3787+
}
3788+
3789+
static void bpf_task_work_irq(struct irq_work *irq_work)
3790+
{
3791+
struct bpf_task_work_context *ctx;
3792+
enum bpf_task_work_state state;
3793+
int err;
3794+
3795+
ctx = container_of(irq_work, struct bpf_task_work_context, irq_work);
3796+
3797+
rcu_read_lock_trace();
3798+
state = atomic_cmpxchg_release(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING);
3799+
if (state == BPF_TW_FREED) {
3800+
bpf_reset_task_work_context(ctx);
3801+
goto out;
3802+
}
3803+
3804+
err = task_work_add(ctx->task, &ctx->work, ctx->mode);
3805+
if (err) {
3806+
state = atomic_cmpxchg_acquire(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_PENDING);
3807+
if (state == BPF_TW_SCHEDULING) {
3808+
bpf_reset_task_work_context(ctx);
3809+
atomic_cmpxchg_release(&ctx->state, BPF_TW_PENDING, BPF_TW_STANDBY);
3810+
}
3811+
goto out;
3812+
}
3813+
state = atomic_cmpxchg_release(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
3814+
if (state == BPF_TW_FREED)
3815+
task_work_cancel_match(ctx->task, task_work_match, ctx);
3816+
out:
3817+
rcu_read_unlock_trace();
3818+
}
3819+
3820+
static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work_context *ctx,
3821+
struct bpf_map *map, bpf_task_work_callback_t callback_fn,
3822+
struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
3823+
{
3824+
struct bpf_prog *prog;
3825+
3826+
BTF_TYPE_EMIT(struct bpf_task_work);
3827+
3828+
prog = bpf_prog_inc_not_zero(aux->prog);
3829+
if (IS_ERR(prog))
3830+
return -EPERM;
3831+
3832+
if (!atomic64_read(&map->usercnt)) {
3833+
bpf_prog_put(prog);
3834+
return -EPERM;
3835+
}
3836+
task = bpf_task_acquire(task);
3837+
if (!task) {
3838+
bpf_prog_put(prog);
3839+
return -EPERM;
3840+
}
3841+
3842+
if (atomic_cmpxchg_acquire(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
3843+
bpf_task_release(task);
3844+
bpf_prog_put(prog);
3845+
return -EBUSY;
3846+
}
3847+
3848+
ctx->task = task;
3849+
ctx->callback_fn = callback_fn;
3850+
ctx->prog = prog;
3851+
ctx->mode = mode;
3852+
init_irq_work(&ctx->irq_work, bpf_task_work_irq);
3853+
init_task_work(&ctx->work, bpf_task_work_callback);
3854+
rcu_assign_pointer(ctx->map, map);
3855+
3856+
irq_work_queue(&ctx->irq_work);
3857+
3858+
return 0;
3859+
}
3860+
37053861
/**
37063862
* bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
37073863
* @task: Task struct for which callback should be scheduled
@@ -3718,7 +3874,8 @@ __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task,
37183874
bpf_task_work_callback_t callback,
37193875
void *aux__prog)
37203876
{
3721-
return 0;
3877+
return bpf_task_work_schedule(task, (struct bpf_task_work_context *)tw, map__map,
3878+
callback, aux__prog, TWA_SIGNAL);
37223879
}
37233880

37243881
/**
@@ -3738,13 +3895,38 @@ __bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task,
37383895
bpf_task_work_callback_t callback,
37393896
void *aux__prog)
37403897
{
3741-
return 0;
3898+
enum task_work_notify_mode mode;
3899+
3900+
mode = task == current && in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
3901+
return bpf_task_work_schedule(task, (struct bpf_task_work_context *)tw, map__map,
3902+
callback, aux__prog, mode);
37423903
}
37433904

37443905
__bpf_kfunc_end_defs();
37453906

37463907
void bpf_task_work_cancel_and_free(void *val)
37473908
{
3909+
struct bpf_task_work_context *ctx = val;
3910+
enum bpf_task_work_state state;
3911+
3912+
state = atomic_xchg(&ctx->state, BPF_TW_FREED);
3913+
switch (state) {
3914+
case BPF_TW_SCHEDULED:
3915+
task_work_cancel_match(ctx->task, task_work_match, ctx);
3916+
fallthrough;
3917+
/* Scheduling codepath is trying to schedule task work, reset context here. */
3918+
case BPF_TW_SCHEDULING:
3919+
bpf_reset_task_work_context(ctx);
3920+
break;
3921+
/* work is not initialized, mark as freed and exit */
3922+
case BPF_TW_STANDBY:
3923+
/* The context is in interim state, scheduling logic should cleanup. */
3924+
case BPF_TW_PENDING:
3925+
/* Callback is already running, it should reset context upon finishing. */
3926+
case BPF_TW_RUNNING:
3927+
default:
3928+
break;
3929+
}
37483930
}
37493931

37503932
BTF_KFUNCS_START(generic_btf_ids)
@@ -3770,6 +3952,8 @@ BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
37703952
BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
37713953
BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
37723954
BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
3955+
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
3956+
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
37733957

37743958
#ifdef CONFIG_CGROUPS
37753959
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)

0 commit comments

Comments
 (0)