Skip to content

Commit e583371

Browse files
committed
drm/vmwgfx: Fix a deadlock in dma buf fence polling
Introduce a version of the fence ops that on release doesn't remove the fence from the pending list, and thus doesn't require a lock to fix poll->fence wait->fence unref deadlocks. vmwgfx overwrites the wait callback to iterate over the list of all fences and update their status, to do that it holds a lock to prevent the list modifcations from other threads. The fence destroy callback both deletes the fence and removes it from the list of pending fences, for which it holds a lock. dma buf polling cb unrefs a fence after it's been signaled: so the poll calls the wait, which signals the fences, which are being destroyed. The destruction tries to acquire the lock on the pending fences list which it can never get because it's held by the wait from which it was called. Old bug, but not a lot of userspace apps were using dma-buf polling interfaces. Fix those, in particular this fixes KDE stalls/deadlock. Signed-off-by: Zack Rusin <[email protected]> Fixes: 2298e80 ("drm/vmwgfx: rework to new fence interface, v2") Cc: Broadcom internal kernel review list <[email protected]> Cc: [email protected] Cc: <[email protected]> # v6.2+ Reviewed-by: Maaz Mombasawala <[email protected]> Reviewed-by: Martin Krastev <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 445d336 commit e583371

File tree

1 file changed

+7
-10
lines changed

1 file changed

+7
-10
lines changed

drivers/gpu/drm/vmwgfx/vmwgfx_fence.c

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
#define VMW_FENCE_WRAP (1 << 31)
3333

3434
struct vmw_fence_manager {
35-
int num_fence_objects;
3635
struct vmw_private *dev_priv;
3736
spinlock_t lock;
3837
struct list_head fence_list;
@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
124123
{
125124
struct vmw_fence_obj *fence =
126125
container_of(f, struct vmw_fence_obj, base);
127-
128126
struct vmw_fence_manager *fman = fman_from_fence(fence);
129127

130-
spin_lock(&fman->lock);
131-
list_del_init(&fence->head);
132-
--fman->num_fence_objects;
133-
spin_unlock(&fman->lock);
128+
if (!list_empty(&fence->head)) {
129+
spin_lock(&fman->lock);
130+
list_del_init(&fence->head);
131+
spin_unlock(&fman->lock);
132+
}
134133
fence->destroy(fence);
135134
}
136135

@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
257256
.release = vmw_fence_obj_destroy,
258257
};
259258

260-
261259
/*
262260
* Execute signal actions on fences recently signaled.
263261
* This is done from a workqueue so we don't have to execute
@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
355353
goto out_unlock;
356354
}
357355
list_add_tail(&fence->head, &fman->fence_list);
358-
++fman->num_fence_objects;
359356

360357
out_unlock:
361358
spin_unlock(&fman->lock);
@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
403400
u32 passed_seqno)
404401
{
405402
u32 goal_seqno;
406-
struct vmw_fence_obj *fence;
403+
struct vmw_fence_obj *fence, *next_fence;
407404

408405
if (likely(!fman->seqno_valid))
409406
return false;
@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
413410
return false;
414411

415412
fman->seqno_valid = false;
416-
list_for_each_entry(fence, &fman->fence_list, head) {
413+
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
417414
if (!list_empty(&fence->seq_passed_actions)) {
418415
fman->seqno_valid = true;
419416
vmw_fence_goal_write(fman->dev_priv,

0 commit comments

Comments
 (0)