@@ -2319,14 +2319,12 @@ static int deliver_event(struct devx_event_subscription *event_sub,
2319
2319
2320
2320
if (ev_file -> omit_data ) {
2321
2321
spin_lock_irqsave (& ev_file -> lock , flags );
2322
- if (!list_empty (& event_sub -> event_list )) {
2322
+ if (!list_empty (& event_sub -> event_list ) ||
2323
+ ev_file -> is_destroyed ) {
2323
2324
spin_unlock_irqrestore (& ev_file -> lock , flags );
2324
2325
return 0 ;
2325
2326
}
2326
2327
2327
- /* is_destroyed is ignored here because we don't have any memory
2328
- * allocation to clean up for the omit_data case
2329
- */
2330
2328
list_add_tail (& event_sub -> event_list , & ev_file -> event_list );
2331
2329
spin_unlock_irqrestore (& ev_file -> lock , flags );
2332
2330
wake_up_interruptible (& ev_file -> poll_wait );
@@ -2473,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2473
2471
return - ERESTARTSYS ;
2474
2472
}
2475
2473
2476
- if (list_empty (& ev_queue -> event_list ) &&
2477
- ev_queue -> is_destroyed )
2478
- return - EIO ;
2479
-
2480
2474
spin_lock_irq (& ev_queue -> lock );
2475
+ if (ev_queue -> is_destroyed ) {
2476
+ spin_unlock_irq (& ev_queue -> lock );
2477
+ return - EIO ;
2478
+ }
2481
2479
}
2482
2480
2483
2481
event = list_entry (ev_queue -> event_list .next ,
@@ -2551,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2551
2549
return - EOVERFLOW ;
2552
2550
}
2553
2551
2554
- if (ev_file -> is_destroyed ) {
2555
- spin_unlock_irq (& ev_file -> lock );
2556
- return - EIO ;
2557
- }
2558
2552
2559
2553
while (list_empty (& ev_file -> event_list )) {
2560
2554
spin_unlock_irq (& ev_file -> lock );
@@ -2667,8 +2661,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2667
2661
2668
2662
spin_lock_irq (& comp_ev_file -> ev_queue .lock );
2669
2663
list_for_each_entry_safe (entry , tmp ,
2670
- & comp_ev_file -> ev_queue .event_list , list )
2664
+ & comp_ev_file -> ev_queue .event_list , list ) {
2665
+ list_del (& entry -> list );
2671
2666
kvfree (entry );
2667
+ }
2672
2668
spin_unlock_irq (& comp_ev_file -> ev_queue .lock );
2673
2669
return 0 ;
2674
2670
};
@@ -2680,11 +2676,29 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2680
2676
container_of (uobj , struct devx_async_event_file ,
2681
2677
uobj );
2682
2678
struct devx_event_subscription * event_sub , * event_sub_tmp ;
2683
- struct devx_async_event_data * entry , * tmp ;
2684
2679
struct mlx5_ib_dev * dev = ev_file -> dev ;
2685
2680
2686
2681
spin_lock_irq (& ev_file -> lock );
2687
2682
ev_file -> is_destroyed = 1 ;
2683
+
2684
+ /* free the pending events allocation */
2685
+ if (ev_file -> omit_data ) {
2686
+ struct devx_event_subscription * event_sub , * tmp ;
2687
+
2688
+ list_for_each_entry_safe (event_sub , tmp , & ev_file -> event_list ,
2689
+ event_list )
2690
+ list_del_init (& event_sub -> event_list );
2691
+
2692
+ } else {
2693
+ struct devx_async_event_data * entry , * tmp ;
2694
+
2695
+ list_for_each_entry_safe (entry , tmp , & ev_file -> event_list ,
2696
+ list ) {
2697
+ list_del (& entry -> list );
2698
+ kfree (entry );
2699
+ }
2700
+ }
2701
+
2688
2702
spin_unlock_irq (& ev_file -> lock );
2689
2703
wake_up_interruptible (& ev_file -> poll_wait );
2690
2704
@@ -2699,15 +2713,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2699
2713
}
2700
2714
mutex_unlock (& dev -> devx_event_table .event_xa_lock );
2701
2715
2702
- /* free the pending events allocation */
2703
- if (!ev_file -> omit_data ) {
2704
- spin_lock_irq (& ev_file -> lock );
2705
- list_for_each_entry_safe (entry , tmp ,
2706
- & ev_file -> event_list , list )
2707
- kfree (entry ); /* read can't come any more */
2708
- spin_unlock_irq (& ev_file -> lock );
2709
- }
2710
-
2711
2716
put_device (& dev -> ib_dev .dev );
2712
2717
return 0 ;
2713
2718
};
0 commit comments