Skip to content

Commit 56f3493

Browse files
dceraologregkh
authored andcommitted
drm/xe: Fix error handling if PXP fails to start
[ Upstream commit ae5fbbd ] Since the PXP start comes after __xe_exec_queue_init() has completed, we need to cleanup what was done in that function in case of a PXP start error. __xe_exec_queue_init calls the submission backend init() function, so we need to introduce an opposite for that. Unfortunately, while we already have a fini() function pointer, it performs other operations in addition to cleaning up what was done by the init(). Therefore, for clarity, the existing fini() has been renamed to destroy(), while a new fini() has been added to only clean up what was done by the init(), with the latter being called by the former (via xe_exec_queue_fini). Fixes: 72d4796 ("drm/xe/pxp/uapi: Add userspace and LRC support for PXP-using queues") Signed-off-by: Daniele Ceraolo Spurio <[email protected]> Cc: John Harrison <[email protected]> Cc: Matthew Brost <[email protected]> Reviewed-by: John Harrison <[email protected]> Signed-off-by: John Harrison <[email protected]> Link: https://lore.kernel.org/r/[email protected] (cherry picked from commit 6266673) Signed-off-by: Rodrigo Vivi <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent 09b473a commit 56f3493

File tree

6 files changed

+72
-41
lines changed

6 files changed

+72
-41
lines changed

drivers/gpu/drm/xe/xe_exec_queue.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,16 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
151151
return err;
152152
}
153153

154+
static void __xe_exec_queue_fini(struct xe_exec_queue *q)
155+
{
156+
int i;
157+
158+
q->ops->fini(q);
159+
160+
for (i = 0; i < q->width; ++i)
161+
xe_lrc_put(q->lrc[i]);
162+
}
163+
154164
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
155165
u32 logical_mask, u16 width,
156166
struct xe_hw_engine *hwe, u32 flags,
@@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
181191
if (xe_exec_queue_uses_pxp(q)) {
182192
err = xe_pxp_exec_queue_add(xe->pxp, q);
183193
if (err)
184-
goto err_post_alloc;
194+
goto err_post_init;
185195
}
186196

187197
return q;
188198

199+
err_post_init:
200+
__xe_exec_queue_fini(q);
189201
err_post_alloc:
190202
__xe_exec_queue_free(q);
191203
return ERR_PTR(err);
@@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
283295
xe_exec_queue_put(eq);
284296
}
285297

286-
q->ops->fini(q);
298+
q->ops->destroy(q);
287299
}
288300

289301
void xe_exec_queue_fini(struct xe_exec_queue *q)
290302
{
291-
int i;
292-
293303
/*
294304
* Before releasing our ref to lrc and xef, accumulate our run ticks
295305
* and wakeup any waiters.
@@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
298308
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
299309
wake_up_var(&q->xef->exec_queue.pending_removal);
300310

301-
for (i = 0; i < q->width; ++i)
302-
xe_lrc_put(q->lrc[i]);
303-
311+
__xe_exec_queue_fini(q);
304312
__xe_exec_queue_free(q);
305313
}
306314

drivers/gpu/drm/xe/xe_exec_queue_types.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
166166
int (*init)(struct xe_exec_queue *q);
167167
/** @kill: Kill inflight submissions for backend */
168168
void (*kill)(struct xe_exec_queue *q);
169-
/** @fini: Fini exec queue for submission backend */
169+
/** @fini: Undoes the init() for submission backend */
170170
void (*fini)(struct xe_exec_queue *q);
171+
/**
172+
* @destroy: Destroy exec queue for submission backend. The backend
173+
* function must call xe_exec_queue_fini() (which will in turn call the
174+
* fini() backend function) to ensure the queue is properly cleaned up.
175+
*/
176+
void (*destroy)(struct xe_exec_queue *q);
171177
/** @set_priority: Set priority for exec queue */
172178
int (*set_priority)(struct xe_exec_queue *q,
173179
enum xe_exec_queue_priority priority);

drivers/gpu/drm/xe/xe_execlist.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -385,10 +385,20 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
385385
return err;
386386
}
387387

388-
static void execlist_exec_queue_fini_async(struct work_struct *w)
388+
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
389+
{
390+
struct xe_execlist_exec_queue *exl = q->execlist;
391+
392+
drm_sched_entity_fini(&exl->entity);
393+
drm_sched_fini(&exl->sched);
394+
395+
kfree(exl);
396+
}
397+
398+
static void execlist_exec_queue_destroy_async(struct work_struct *w)
389399
{
390400
struct xe_execlist_exec_queue *ee =
391-
container_of(w, struct xe_execlist_exec_queue, fini_async);
401+
container_of(w, struct xe_execlist_exec_queue, destroy_async);
392402
struct xe_exec_queue *q = ee->q;
393403
struct xe_execlist_exec_queue *exl = q->execlist;
394404
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
401411
list_del(&exl->active_link);
402412
spin_unlock_irqrestore(&exl->port->lock, flags);
403413

404-
drm_sched_entity_fini(&exl->entity);
405-
drm_sched_fini(&exl->sched);
406-
kfree(exl);
407-
408414
xe_exec_queue_fini(q);
409415
}
410416

@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
413419
/* NIY */
414420
}
415421

416-
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
422+
static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
417423
{
418-
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
419-
queue_work(system_unbound_wq, &q->execlist->fini_async);
424+
INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
425+
queue_work(system_unbound_wq, &q->execlist->destroy_async);
420426
}
421427

422428
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
467473
.init = execlist_exec_queue_init,
468474
.kill = execlist_exec_queue_kill,
469475
.fini = execlist_exec_queue_fini,
476+
.destroy = execlist_exec_queue_destroy,
470477
.set_priority = execlist_exec_queue_set_priority,
471478
.set_timeslice = execlist_exec_queue_set_timeslice,
472479
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,

drivers/gpu/drm/xe/xe_execlist_types.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
4242

4343
bool has_run;
4444

45-
struct work_struct fini_async;
45+
struct work_struct destroy_async;
4646

4747
enum xe_exec_queue_priority active_priority;
4848
struct list_head active_link;

drivers/gpu/drm/xe/xe_guc_exec_queue_types.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
3535
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
3636
/** @lr_tdr: long running TDR worker */
3737
struct work_struct lr_tdr;
38-
/** @fini_async: do final fini async from this worker */
39-
struct work_struct fini_async;
38+
/** @destroy_async: do final destroy async from this worker */
39+
struct work_struct destroy_async;
4040
/** @resume_time: time of last resume */
4141
u64 resume_time;
4242
/** @state: GuC specific state for this xe_exec_queue */

drivers/gpu/drm/xe/xe_guc_submit.c

Lines changed: 31 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1269,48 +1269,57 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
12691269
return DRM_GPU_SCHED_STAT_NOMINAL;
12701270
}
12711271

1272-
static void __guc_exec_queue_fini_async(struct work_struct *w)
1272+
static void guc_exec_queue_fini(struct xe_exec_queue *q)
1273+
{
1274+
struct xe_guc_exec_queue *ge = q->guc;
1275+
struct xe_guc *guc = exec_queue_to_guc(q);
1276+
1277+
release_guc_id(guc, q);
1278+
xe_sched_entity_fini(&ge->entity);
1279+
xe_sched_fini(&ge->sched);
1280+
1281+
/*
1282+
* RCU free due sched being exported via DRM scheduler fences
1283+
* (timeline name).
1284+
*/
1285+
kfree_rcu(ge, rcu);
1286+
}
1287+
1288+
static void __guc_exec_queue_destroy_async(struct work_struct *w)
12731289
{
12741290
struct xe_guc_exec_queue *ge =
1275-
container_of(w, struct xe_guc_exec_queue, fini_async);
1291+
container_of(w, struct xe_guc_exec_queue, destroy_async);
12761292
struct xe_exec_queue *q = ge->q;
12771293
struct xe_guc *guc = exec_queue_to_guc(q);
12781294

12791295
xe_pm_runtime_get(guc_to_xe(guc));
12801296
trace_xe_exec_queue_destroy(q);
12811297

1282-
release_guc_id(guc, q);
12831298
if (xe_exec_queue_is_lr(q))
12841299
cancel_work_sync(&ge->lr_tdr);
12851300
/* Confirm no work left behind accessing device structures */
12861301
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
1287-
xe_sched_entity_fini(&ge->entity);
1288-
xe_sched_fini(&ge->sched);
12891302

1290-
/*
1291-
* RCU free due sched being exported via DRM scheduler fences
1292-
* (timeline name).
1293-
*/
1294-
kfree_rcu(ge, rcu);
12951303
xe_exec_queue_fini(q);
1304+
12961305
xe_pm_runtime_put(guc_to_xe(guc));
12971306
}
12981307

1299-
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1308+
static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
13001309
{
13011310
struct xe_guc *guc = exec_queue_to_guc(q);
13021311
struct xe_device *xe = guc_to_xe(guc);
13031312

1304-
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1313+
INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
13051314

13061315
/* We must block on kernel engines so slabs are empty on driver unload */
13071316
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1308-
__guc_exec_queue_fini_async(&q->guc->fini_async);
1317+
__guc_exec_queue_destroy_async(&q->guc->destroy_async);
13091318
else
1310-
queue_work(xe->destroy_wq, &q->guc->fini_async);
1319+
queue_work(xe->destroy_wq, &q->guc->destroy_async);
13111320
}
13121321

1313-
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1322+
static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
13141323
{
13151324
/*
13161325
* Might be done from within the GPU scheduler, need to do async as we
@@ -1319,7 +1328,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
13191328
* this we and don't really care when everything is fini'd, just that it
13201329
* is.
13211330
*/
1322-
guc_exec_queue_fini_async(q);
1331+
guc_exec_queue_destroy_async(q);
13231332
}
13241333

13251334
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1333,7 +1342,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
13331342
if (exec_queue_registered(q))
13341343
disable_scheduling_deregister(guc, q);
13351344
else
1336-
__guc_exec_queue_fini(guc, q);
1345+
__guc_exec_queue_destroy(guc, q);
13371346
}
13381347

13391348
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1566,14 +1575,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
15661575
#define STATIC_MSG_CLEANUP 0
15671576
#define STATIC_MSG_SUSPEND 1
15681577
#define STATIC_MSG_RESUME 2
1569-
static void guc_exec_queue_fini(struct xe_exec_queue *q)
1578+
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
15701579
{
15711580
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
15721581

15731582
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
15741583
guc_exec_queue_add_msg(q, msg, CLEANUP);
15751584
else
1576-
__guc_exec_queue_fini(exec_queue_to_guc(q), q);
1585+
__guc_exec_queue_destroy(exec_queue_to_guc(q), q);
15771586
}
15781587

15791588
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1703,6 +1712,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
17031712
.init = guc_exec_queue_init,
17041713
.kill = guc_exec_queue_kill,
17051714
.fini = guc_exec_queue_fini,
1715+
.destroy = guc_exec_queue_destroy,
17061716
.set_priority = guc_exec_queue_set_priority,
17071717
.set_timeslice = guc_exec_queue_set_timeslice,
17081718
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1724,7 +1734,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
17241734
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
17251735
xe_exec_queue_put(q);
17261736
else if (exec_queue_destroyed(q))
1727-
__guc_exec_queue_fini(guc, q);
1737+
__guc_exec_queue_destroy(guc, q);
17281738
}
17291739
if (q->guc->suspend_pending) {
17301740
set_exec_queue_suspended(q);
@@ -1981,7 +1991,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
19811991
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
19821992
xe_exec_queue_put(q);
19831993
else
1984-
__guc_exec_queue_fini(guc, q);
1994+
__guc_exec_queue_destroy(guc, q);
19851995
}
19861996

19871997
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)

0 commit comments

Comments
 (0)