Skip to content

Commit b634acb

Browse files
committed
Merge tag 'drm-misc-fixes-2024-10-10' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
Short summary of fixes pull: fbdev-dma: - Only clean up deferred I/O if instanciated nouveau: - dmem: Fix privileged error in copy engine channel; Fix possible data leak in migrate_to_ram() - gsp: Fix coding style sched: - Avoid leaking lockdep map v3d: - Stop active perfmon before destroying it vc4: - Stop active perfmon before destroying it xe: - Drop GuC submit_wq pool Signed-off-by: Dave Airlie <[email protected]> From: Thomas Zimmermann <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
2 parents fe4a435 + fcddc71 commit b634acb

File tree

9 files changed

+31
-88
lines changed

9 files changed

+31
-88
lines changed

drivers/gpu/drm/drm_fbdev_dma.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
5050
if (!fb_helper->dev)
5151
return;
5252

53-
fb_deferred_io_cleanup(info);
53+
if (info->fbdefio)
54+
fb_deferred_io_cleanup(info);
5455
drm_fb_helper_fini(fb_helper);
5556

5657
drm_client_buffer_vunmap(fb_helper->buffer);

drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ struct nvkm_gsp {
210210
} *rm;
211211

212212
struct {
213-
struct mutex mutex;;
213+
struct mutex mutex;
214214
struct idr idr;
215215
} client_id;
216216

drivers/gpu/drm/nouveau/nouveau_dmem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
193193
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
194194
goto done;
195195

196-
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
196+
dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
197197
if (!dpage)
198198
goto done;
199199

drivers/gpu/drm/nouveau/nouveau_drm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
331331
return;
332332
}
333333

334-
ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
334+
ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
335335
if (ret)
336336
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
337337
}

drivers/gpu/drm/scheduler/sched_main.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,12 @@
8787
#define CREATE_TRACE_POINTS
8888
#include "gpu_scheduler_trace.h"
8989

90+
#ifdef CONFIG_LOCKDEP
91+
static struct lockdep_map drm_sched_lockdep_map = {
92+
.name = "drm_sched_lockdep_map"
93+
};
94+
#endif
95+
9096
#define to_drm_sched_job(sched_job) \
9197
container_of((sched_job), struct drm_sched_job, queue_node)
9298

@@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
12691275
sched->submit_wq = submit_wq;
12701276
sched->own_submit_wq = false;
12711277
} else {
1278+
#ifdef CONFIG_LOCKDEP
1279+
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
1280+
&drm_sched_lockdep_map);
1281+
#else
12721282
sched->submit_wq = alloc_ordered_workqueue(name, 0);
1283+
#endif
12731284
if (!sched->submit_wq)
12741285
return -ENOMEM;
12751286

drivers/gpu/drm/v3d/v3d_perfmon.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
306306
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
307307
{
308308
struct v3d_perfmon *perfmon = elem;
309+
struct v3d_dev *v3d = (struct v3d_dev *)data;
310+
311+
/* If the active perfmon is being destroyed, stop it first */
312+
if (perfmon == v3d->active_perfmon)
313+
v3d_perfmon_stop(v3d, perfmon, false);
309314

310315
v3d_perfmon_put(perfmon);
311316

@@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
314319

315320
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
316321
{
322+
struct v3d_dev *v3d = v3d_priv->v3d;
323+
317324
mutex_lock(&v3d_priv->perfmon.lock);
318-
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
325+
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
319326
idr_destroy(&v3d_priv->perfmon.idr);
320327
mutex_unlock(&v3d_priv->perfmon.lock);
321328
mutex_destroy(&v3d_priv->perfmon.lock);

drivers/gpu/drm/vc4/vc4_perfmon.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
116116
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
117117
{
118118
struct vc4_perfmon *perfmon = elem;
119+
struct vc4_dev *vc4 = (struct vc4_dev *)data;
120+
121+
/* If the active perfmon is being destroyed, stop it first */
122+
if (perfmon == vc4->active_perfmon)
123+
vc4_perfmon_stop(vc4, perfmon, false);
119124

120125
vc4_perfmon_put(perfmon);
121126

@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
130135
return;
131136

132137
mutex_lock(&vc4file->perfmon.lock);
133-
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
138+
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
134139
idr_destroy(&vc4file->perfmon.idr);
135140
mutex_unlock(&vc4file->perfmon.lock);
136141
mutex_destroy(&vc4file->perfmon.lock);

drivers/gpu/drm/xe/xe_guc_submit.c

Lines changed: 1 addition & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
224224
EXEC_QUEUE_STATE_BANNED));
225225
}
226226

227-
#ifdef CONFIG_PROVE_LOCKING
228-
static int alloc_submit_wq(struct xe_guc *guc)
229-
{
230-
int i;
231-
232-
for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
233-
guc->submission_state.submit_wq_pool[i] =
234-
alloc_ordered_workqueue("submit_wq", 0);
235-
if (!guc->submission_state.submit_wq_pool[i])
236-
goto err_free;
237-
}
238-
239-
return 0;
240-
241-
err_free:
242-
while (i)
243-
destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
244-
245-
return -ENOMEM;
246-
}
247-
248-
static void free_submit_wq(struct xe_guc *guc)
249-
{
250-
int i;
251-
252-
for (i = 0; i < NUM_SUBMIT_WQ; ++i)
253-
destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
254-
}
255-
256-
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
257-
{
258-
int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
259-
260-
return guc->submission_state.submit_wq_pool[idx];
261-
}
262-
#else
263-
static int alloc_submit_wq(struct xe_guc *guc)
264-
{
265-
return 0;
266-
}
267-
268-
static void free_submit_wq(struct xe_guc *guc)
269-
{
270-
271-
}
272-
273-
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
274-
{
275-
return NULL;
276-
}
277-
#endif
278-
279-
static void xe_guc_submit_fini(struct xe_guc *guc)
280-
{
281-
struct xe_device *xe = guc_to_xe(guc);
282-
struct xe_gt *gt = guc_to_gt(guc);
283-
int ret;
284-
285-
ret = wait_event_timeout(guc->submission_state.fini_wq,
286-
xa_empty(&guc->submission_state.exec_queue_lookup),
287-
HZ * 5);
288-
289-
drain_workqueue(xe->destroy_wq);
290-
291-
xe_gt_assert(gt, ret);
292-
}
293-
294227
static void guc_submit_fini(struct drm_device *drm, void *arg)
295228
{
296229
struct xe_guc *guc = arg;
297230

298-
xe_guc_submit_fini(guc);
299231
xa_destroy(&guc->submission_state.exec_queue_lookup);
300-
free_submit_wq(guc);
301232
}
302233

303234
static void guc_submit_wedged_fini(void *arg)
@@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
359290
if (err)
360291
return err;
361292

362-
err = alloc_submit_wq(guc);
363-
if (err)
364-
return err;
365-
366293
gt->exec_queue_ops = &guc_exec_queue_ops;
367294

368295
xa_init(&guc->submission_state.exec_queue_lookup);
@@ -1482,8 +1409,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
14821409
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
14831410
msecs_to_jiffies(q->sched_props.job_timeout_ms);
14841411
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1485-
get_submit_wq(guc),
1486-
q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1412+
NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
14871413
timeout, guc_to_gt(guc)->ordered_wq, NULL,
14881414
q->name, gt_to_xe(q->gt)->drm.dev);
14891415
if (err)

drivers/gpu/drm/xe/xe_guc_types.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,6 @@ struct xe_guc {
7272
atomic_t stopped;
7373
/** @submission_state.lock: protects submission state */
7474
struct mutex lock;
75-
#ifdef CONFIG_PROVE_LOCKING
76-
#define NUM_SUBMIT_WQ 256
77-
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
78-
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
79-
/** @submission_state.submit_wq_idx: submission ordered workqueue index */
80-
int submit_wq_idx;
81-
#endif
8275
/** @submission_state.enabled: submission is enabled */
8376
bool enabled;
8477
/** @submission_state.fini_wq: submit fini wait queue */

0 commit comments

Comments
 (0)