@@ -1092,9 +1092,14 @@ static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
10921092 return (void * )value - round_up (map -> key_size , 8 );
10931093}
10941094
1095+ struct bpf_async_res {
1096+ struct bpf_prog * prog ;
1097+ struct bpf_map * st_ops_assoc ;
1098+ };
1099+
10951100struct bpf_async_cb {
10961101 struct bpf_map * map ;
1097- struct bpf_prog * prog ;
1102+ struct bpf_async_res res ;
10981103 void __rcu * callback_fn ;
10991104 void * value ;
11001105 union {
@@ -1299,8 +1304,8 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
12991304 break ;
13001305 }
13011306 cb -> map = map ;
1302- cb -> prog = NULL ;
13031307 cb -> flags = flags ;
1308+ memset (& cb -> res , 0 , sizeof (cb -> res ));
13041309 rcu_assign_pointer (cb -> callback_fn , NULL );
13051310
13061311 WRITE_ONCE (async -> cb , cb );
@@ -1351,11 +1356,47 @@ static const struct bpf_func_proto bpf_timer_init_proto = {
13511356 .arg3_type = ARG_ANYTHING ,
13521357};
13531358
1359+ static void bpf_async_res_put (struct bpf_async_res * res )
1360+ {
1361+ bpf_prog_put (res -> prog );
1362+
1363+ if (res -> st_ops_assoc )
1364+ bpf_map_put (res -> st_ops_assoc );
1365+ }
1366+
1367+ static int bpf_async_res_get (struct bpf_async_res * res , struct bpf_prog * prog )
1368+ {
1369+ struct bpf_map * st_ops_assoc = NULL ;
1370+ int err ;
1371+
1372+ prog = bpf_prog_inc_not_zero (prog );
1373+ if (IS_ERR (prog ))
1374+ return PTR_ERR (prog );
1375+
1376+ st_ops_assoc = READ_ONCE (prog -> aux -> st_ops_assoc );
1377+ if (prog -> type == BPF_PROG_TYPE_STRUCT_OPS &&
1378+ st_ops_assoc && st_ops_assoc != BPF_PTR_POISON ) {
1379+ st_ops_assoc = bpf_map_inc_not_zero (st_ops_assoc );
1380+ if (IS_ERR (st_ops_assoc )) {
1381+ err = PTR_ERR (st_ops_assoc );
1382+ goto put_prog ;
1383+ }
1384+ }
1385+
1386+ res -> prog = prog ;
1387+ res -> st_ops_assoc = st_ops_assoc ;
1388+ return 0 ;
1389+ put_prog :
1390+ bpf_prog_put (prog );
1391+ return err ;
1392+ }
1393+
13541394static int __bpf_async_set_callback (struct bpf_async_kern * async , void * callback_fn ,
13551395 struct bpf_prog_aux * aux , unsigned int flags ,
13561396 enum bpf_async_type type )
13571397{
13581398 struct bpf_prog * prev , * prog = aux -> prog ;
1399+ struct bpf_async_res res ;
13591400 struct bpf_async_cb * cb ;
13601401 int ret = 0 ;
13611402
@@ -1376,20 +1417,18 @@ static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback
13761417 ret = - EPERM ;
13771418 goto out ;
13781419 }
1379- prev = cb -> prog ;
1420+ prev = cb -> res . prog ;
13801421 if (prev != prog ) {
1381- /* Bump prog refcnt once. Every bpf_timer_set_callback()
1422+ /* Get prog and related resources once. Every bpf_timer_set_callback()
13821423 * can pick different callback_fn-s within the same prog.
13831424 */
1384- prog = bpf_prog_inc_not_zero (prog );
1385- if (IS_ERR (prog )) {
1386- ret = PTR_ERR (prog );
1425+ ret = bpf_async_res_get (& res , prog );
1426+ if (ret )
13871427 goto out ;
1388- }
13891428 if (prev )
1390- /* Drop prev prog refcnt when swapping with new prog */
1391- bpf_prog_put ( prev );
1392- cb -> prog = prog ;
1429+ /* Put prev prog and related resources when swapping with new prog */
1430+ bpf_async_res_put ( & cb -> res );
1431+ cb -> res = res ;
13931432 }
13941433 rcu_assign_pointer (cb -> callback_fn , callback_fn );
13951434out :
@@ -1423,7 +1462,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, fla
14231462 return - EINVAL ;
14241463 __bpf_spin_lock_irqsave (& timer -> lock );
14251464 t = timer -> timer ;
1426- if (!t || !t -> cb .prog ) {
1465+ if (!t || !t -> cb .res . prog ) {
14271466 ret = - EINVAL ;
14281467 goto out ;
14291468 }
@@ -1451,14 +1490,14 @@ static const struct bpf_func_proto bpf_timer_start_proto = {
14511490 .arg3_type = ARG_ANYTHING ,
14521491};
14531492
1454- static void drop_prog_refcnt (struct bpf_async_cb * async )
1493+ static void bpf_async_cb_reset (struct bpf_async_cb * cb )
14551494{
1456- struct bpf_prog * prog = async -> prog ;
1495+ struct bpf_prog * prog = cb -> res . prog ;
14571496
14581497 if (prog ) {
1459- bpf_prog_put ( prog );
1460- async -> prog = NULL ;
1461- rcu_assign_pointer (async -> callback_fn , NULL );
1498+ bpf_async_res_put ( & cb -> res );
1499+ memset ( & cb -> res , 0 , sizeof ( cb -> res )) ;
1500+ rcu_assign_pointer (cb -> callback_fn , NULL );
14621501 }
14631502}
14641503
@@ -1512,7 +1551,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
15121551 goto out ;
15131552 }
15141553drop :
1515- drop_prog_refcnt (& t -> cb );
1554+ bpf_async_cb_reset (& t -> cb );
15161555out :
15171556 __bpf_spin_unlock_irqrestore (& timer -> lock );
15181557 /* Cancel the timer and wait for associated callback to finish
@@ -1545,7 +1584,7 @@ static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *a
15451584 cb = async -> cb ;
15461585 if (!cb )
15471586 goto out ;
1548- drop_prog_refcnt (cb );
1587+ bpf_async_cb_reset (cb );
15491588 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
15501589 * this timer, since it won't be initialized.
15511590 */
@@ -3112,7 +3151,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
31123151 if (flags )
31133152 return - EINVAL ;
31143153 w = READ_ONCE (async -> work );
3115- if (!w || !READ_ONCE (w -> cb .prog ))
3154+ if (!w || !READ_ONCE (w -> cb .res . prog ))
31163155 return - EINVAL ;
31173156
31183157 schedule_work (& w -> work );
@@ -4034,8 +4073,8 @@ struct bpf_task_work_ctx {
40344073 refcount_t refcnt ;
40354074 struct callback_head work ;
40364075 struct irq_work irq_work ;
4037- /* bpf_prog that schedules task work */
4038- struct bpf_prog * prog ;
4076+ /* bpf_prog that schedules task work and related resources */
4077+ struct bpf_async_res res ;
40394078 /* task for which callback is scheduled */
40404079 struct task_struct * task ;
40414080 /* the map and map value associated with this context */
@@ -4053,9 +4092,9 @@ struct bpf_task_work_kern {
40534092
40544093static void bpf_task_work_ctx_reset (struct bpf_task_work_ctx * ctx )
40554094{
4056- if (ctx -> prog ) {
4057- bpf_prog_put ( ctx -> prog );
4058- ctx -> prog = NULL ;
4095+ if (ctx -> res . prog ) {
4096+ bpf_async_res_put ( & ctx -> res );
4097+ memset ( & ctx -> res , 0 , sizeof ( ctx -> res )) ;
40594098 }
40604099 if (ctx -> task ) {
40614100 bpf_task_release (ctx -> task );
@@ -4233,19 +4272,19 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
42334272 struct bpf_map * map , bpf_task_work_callback_t callback_fn ,
42344273 struct bpf_prog_aux * aux , enum task_work_notify_mode mode )
42354274{
4236- struct bpf_prog * prog ;
42374275 struct bpf_task_work_ctx * ctx ;
4276+ struct bpf_async_res res ;
42384277 int err ;
42394278
42404279 BTF_TYPE_EMIT (struct bpf_task_work );
42414280
4242- prog = bpf_prog_inc_not_zero ( aux -> prog );
4243- if (IS_ERR ( prog ) )
4244- return - EBADF ;
4281+ err = bpf_async_res_get ( & res , aux -> prog );
4282+ if (err )
4283+ return err ;
42454284 task = bpf_task_acquire (task );
42464285 if (!task ) {
42474286 err = - EBADF ;
4248- goto release_prog ;
4287+ goto release_res ;
42494288 }
42504289
42514290 ctx = bpf_task_work_acquire_ctx (tw , map );
@@ -4256,7 +4295,7 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
42564295
42574296 ctx -> task = task ;
42584297 ctx -> callback_fn = callback_fn ;
4259- ctx -> prog = prog ;
4298+ ctx -> res = res ;
42604299 ctx -> mode = mode ;
42614300 ctx -> map = map ;
42624301 ctx -> map_val = (void * )tw - map -> record -> task_work_off ;
@@ -4268,8 +4307,8 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
42684307
42694308release_all :
42704309 bpf_task_release (task );
4271- release_prog :
4272- bpf_prog_put ( prog );
4310+ release_res :
4311+ bpf_async_res_put ( & res );
42734312 return err ;
42744313}
42754314
0 commit comments