Skip to content

Commit a8af869

Browse files
yishaihjgunthorpe
authored andcommitted
RDMA/mlx5: Fix async events cleanup flows
As in the prior patch, the devx code is not fully cleaning up its event_lists before finishing driver_destroy allowing a later read to trigger user after free conditions. Re-arrange things so that the event_list is always empty after destroy and ensure it remains empty until the file is closed. Fixes: f7c8416 ("RDMA/core: Simplify destruction of FD uobjects") Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Yishai Hadas <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent a0767da commit a8af869

File tree

1 file changed

+28
-23
lines changed
  • drivers/infiniband/hw/mlx5

1 file changed

+28
-23
lines changed

drivers/infiniband/hw/mlx5/devx.c

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2319,14 +2319,12 @@ static int deliver_event(struct devx_event_subscription *event_sub,
23192319

23202320
if (ev_file->omit_data) {
23212321
spin_lock_irqsave(&ev_file->lock, flags);
2322-
if (!list_empty(&event_sub->event_list)) {
2322+
if (!list_empty(&event_sub->event_list) ||
2323+
ev_file->is_destroyed) {
23232324
spin_unlock_irqrestore(&ev_file->lock, flags);
23242325
return 0;
23252326
}
23262327

2327-
/* is_destroyed is ignored here because we don't have any memory
2328-
* allocation to clean up for the omit_data case
2329-
*/
23302328
list_add_tail(&event_sub->event_list, &ev_file->event_list);
23312329
spin_unlock_irqrestore(&ev_file->lock, flags);
23322330
wake_up_interruptible(&ev_file->poll_wait);
@@ -2473,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
24732471
return -ERESTARTSYS;
24742472
}
24752473

2476-
if (list_empty(&ev_queue->event_list) &&
2477-
ev_queue->is_destroyed)
2478-
return -EIO;
2479-
24802474
spin_lock_irq(&ev_queue->lock);
2475+
if (ev_queue->is_destroyed) {
2476+
spin_unlock_irq(&ev_queue->lock);
2477+
return -EIO;
2478+
}
24812479
}
24822480

24832481
event = list_entry(ev_queue->event_list.next,
@@ -2551,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
25512549
return -EOVERFLOW;
25522550
}
25532551

2554-
if (ev_file->is_destroyed) {
2555-
spin_unlock_irq(&ev_file->lock);
2556-
return -EIO;
2557-
}
25582552

25592553
while (list_empty(&ev_file->event_list)) {
25602554
spin_unlock_irq(&ev_file->lock);
@@ -2667,8 +2661,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
26672661

26682662
spin_lock_irq(&comp_ev_file->ev_queue.lock);
26692663
list_for_each_entry_safe(entry, tmp,
2670-
&comp_ev_file->ev_queue.event_list, list)
2664+
&comp_ev_file->ev_queue.event_list, list) {
2665+
list_del(&entry->list);
26712666
kvfree(entry);
2667+
}
26722668
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
26732669
return 0;
26742670
};
@@ -2680,11 +2676,29 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
26802676
container_of(uobj, struct devx_async_event_file,
26812677
uobj);
26822678
struct devx_event_subscription *event_sub, *event_sub_tmp;
2683-
struct devx_async_event_data *entry, *tmp;
26842679
struct mlx5_ib_dev *dev = ev_file->dev;
26852680

26862681
spin_lock_irq(&ev_file->lock);
26872682
ev_file->is_destroyed = 1;
2683+
2684+
/* free the pending events allocation */
2685+
if (ev_file->omit_data) {
2686+
struct devx_event_subscription *event_sub, *tmp;
2687+
2688+
list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2689+
event_list)
2690+
list_del_init(&event_sub->event_list);
2691+
2692+
} else {
2693+
struct devx_async_event_data *entry, *tmp;
2694+
2695+
list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2696+
list) {
2697+
list_del(&entry->list);
2698+
kfree(entry);
2699+
}
2700+
}
2701+
26882702
spin_unlock_irq(&ev_file->lock);
26892703
wake_up_interruptible(&ev_file->poll_wait);
26902704

@@ -2699,15 +2713,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
26992713
}
27002714
mutex_unlock(&dev->devx_event_table.event_xa_lock);
27012715

2702-
/* free the pending events allocation */
2703-
if (!ev_file->omit_data) {
2704-
spin_lock_irq(&ev_file->lock);
2705-
list_for_each_entry_safe(entry, tmp,
2706-
&ev_file->event_list, list)
2707-
kfree(entry); /* read can't come any more */
2708-
spin_unlock_irq(&ev_file->lock);
2709-
}
2710-
27112716
put_device(&dev->ib_dev.dev);
27122717
return 0;
27132718
};

0 commit comments

Comments
 (0)