@@ -322,16 +322,6 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
322
322
if (err )
323
323
goto out ;
324
324
325
- /*
326
- * The program has been safely inserted, decrement the reference count
327
- * so it doesn't interfere with the number of actual user handles.
328
- * This is safe to do because:
329
- * - we overrite the put_ptr in the prog fd map
330
- * - we also have a cleanup function that monitors when a program gets
331
- * released and we manually do the cleanup in the prog fd map
332
- */
333
- bpf_prog_sub (prog , 1 );
334
-
335
325
/* return the index */
336
326
err = index ;
337
327
@@ -365,14 +355,46 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
365
355
return prog_type ;
366
356
}
367
357
358
+ static void hid_bpf_link_release (struct bpf_link * link )
359
+ {
360
+ struct hid_bpf_link * hid_link =
361
+ container_of (link , struct hid_bpf_link , link );
362
+
363
+ __clear_bit (hid_link -> hid_table_index , jmp_table .enabled );
364
+ schedule_work (& release_work );
365
+ }
366
+
367
+ static void hid_bpf_link_dealloc (struct bpf_link * link )
368
+ {
369
+ struct hid_bpf_link * hid_link =
370
+ container_of (link , struct hid_bpf_link , link );
371
+
372
+ kfree (hid_link );
373
+ }
374
+
375
+ static void hid_bpf_link_show_fdinfo (const struct bpf_link * link ,
376
+ struct seq_file * seq )
377
+ {
378
+ seq_printf (seq ,
379
+ "attach_type:\tHID-BPF\n" );
380
+ }
381
+
382
+ static const struct bpf_link_ops hid_bpf_link_lops = {
383
+ .release = hid_bpf_link_release ,
384
+ .dealloc = hid_bpf_link_dealloc ,
385
+ .show_fdinfo = hid_bpf_link_show_fdinfo ,
386
+ };
387
+
368
388
/* called from syscall */
369
389
noinline int
370
390
__hid_bpf_attach_prog (struct hid_device * hdev , enum hid_bpf_prog_type prog_type ,
371
391
int prog_fd , __u32 flags )
372
392
{
393
+ struct bpf_link_primer link_primer ;
394
+ struct hid_bpf_link * link ;
373
395
struct bpf_prog * prog = NULL ;
374
396
struct hid_bpf_prog_entry * prog_entry ;
375
- int cnt , err = - EINVAL , prog_idx = -1 ;
397
+ int cnt , err = - EINVAL , prog_table_idx = -1 ;
376
398
377
399
/* take a ref on the prog itself */
378
400
prog = bpf_prog_get (prog_fd );
@@ -381,23 +403,32 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
381
403
382
404
mutex_lock (& hid_bpf_attach_lock );
383
405
406
+ link = kzalloc (sizeof (* link ), GFP_USER );
407
+ if (!link ) {
408
+ err = - ENOMEM ;
409
+ goto err_unlock ;
410
+ }
411
+
412
+ bpf_link_init (& link -> link , BPF_LINK_TYPE_UNSPEC ,
413
+ & hid_bpf_link_lops , prog );
414
+
384
415
/* do not attach too many programs to a given HID device */
385
416
cnt = hid_bpf_program_count (hdev , NULL , prog_type );
386
417
if (cnt < 0 ) {
387
418
err = cnt ;
388
- goto out_unlock ;
419
+ goto err_unlock ;
389
420
}
390
421
391
422
if (cnt >= hid_bpf_max_programs (prog_type )) {
392
423
err = - E2BIG ;
393
- goto out_unlock ;
424
+ goto err_unlock ;
394
425
}
395
426
396
- prog_idx = hid_bpf_insert_prog (prog_fd , prog );
427
+ prog_table_idx = hid_bpf_insert_prog (prog_fd , prog );
397
428
/* if the jmp table is full, abort */
398
- if (prog_idx < 0 ) {
399
- err = prog_idx ;
400
- goto out_unlock ;
429
+ if (prog_table_idx < 0 ) {
430
+ err = prog_table_idx ;
431
+ goto err_unlock ;
401
432
}
402
433
403
434
if (flags & HID_BPF_FLAG_INSERT_HEAD ) {
@@ -412,22 +443,32 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
412
443
413
444
/* we steal the ref here */
414
445
prog_entry -> prog = prog ;
415
- prog_entry -> idx = prog_idx ;
446
+ prog_entry -> idx = prog_table_idx ;
416
447
prog_entry -> hdev = hdev ;
417
448
prog_entry -> type = prog_type ;
418
449
419
450
/* finally store the index in the device list */
420
451
err = hid_bpf_populate_hdev (hdev , prog_type );
452
+ if (err ) {
453
+ hid_bpf_release_prog_at (prog_table_idx );
454
+ goto err_unlock ;
455
+ }
456
+
457
+ link -> hid_table_index = prog_table_idx ;
458
+
459
+ err = bpf_link_prime (& link -> link , & link_primer );
421
460
if (err )
422
- hid_bpf_release_prog_at ( prog_idx ) ;
461
+ goto err_unlock ;
423
462
424
- out_unlock :
425
463
mutex_unlock (& hid_bpf_attach_lock );
426
464
427
- /* we only use prog as a key in the various tables, so we don't need to actually
428
- * increment the ref count.
429
- */
465
+ return bpf_link_settle (& link_primer );
466
+
467
+ err_unlock :
468
+ mutex_unlock (& hid_bpf_attach_lock );
469
+
430
470
bpf_prog_put (prog );
471
+ kfree (link );
431
472
432
473
return err ;
433
474
}
@@ -460,36 +501,10 @@ void __hid_bpf_destroy_device(struct hid_device *hdev)
460
501
461
502
void call_hid_bpf_prog_put_deferred (struct work_struct * work )
462
503
{
463
- struct bpf_prog_aux * aux ;
464
- struct bpf_prog * prog ;
465
- bool found = false;
466
- int i ;
467
-
468
- aux = container_of (work , struct bpf_prog_aux , work );
469
- prog = aux -> prog ;
470
-
471
- /* we don't need locking here because the entries in the progs table
472
- * are stable:
473
- * if there are other users (and the progs entries might change), we
474
- * would simply not have been called.
475
- */
476
- for (i = 0 ; i < HID_BPF_MAX_PROGS ; i ++ ) {
477
- if (jmp_table .progs [i ] == prog ) {
478
- __clear_bit (i , jmp_table .enabled );
479
- found = true;
480
- }
481
- }
482
-
483
- if (found )
484
- /* schedule release of all detached progs */
485
- schedule_work (& release_work );
486
- }
487
-
488
- static void hid_bpf_prog_fd_array_put_ptr (void * ptr )
489
- {
504
+ /* kept around for patch readability, to be dropped in the next commmit */
490
505
}
491
506
492
- #define HID_BPF_PROGS_COUNT 2
507
+ #define HID_BPF_PROGS_COUNT 1
493
508
494
509
static struct bpf_link * links [HID_BPF_PROGS_COUNT ];
495
510
static struct entrypoints_bpf * skel ;
@@ -528,8 +543,6 @@ void hid_bpf_free_links_and_skel(void)
528
543
idx ++ ; \
529
544
} while (0 )
530
545
531
- static struct bpf_map_ops hid_bpf_prog_fd_maps_ops ;
532
-
533
546
int hid_bpf_preload_skel (void )
534
547
{
535
548
int err , idx = 0 ;
@@ -548,14 +561,7 @@ int hid_bpf_preload_skel(void)
548
561
goto out ;
549
562
}
550
563
551
- /* our jump table is stealing refs, so we should not decrement on removal of elements */
552
- hid_bpf_prog_fd_maps_ops = * jmp_table .map -> ops ;
553
- hid_bpf_prog_fd_maps_ops .map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr ;
554
-
555
- jmp_table .map -> ops = & hid_bpf_prog_fd_maps_ops ;
556
-
557
564
ATTACH_AND_STORE_LINK (hid_tail_call );
558
- ATTACH_AND_STORE_LINK (hid_bpf_prog_put_deferred );
559
565
560
566
return 0 ;
561
567
out :
0 commit comments