Skip to content

Commit 475be51

Browse files
committed
Merge tag 'drm-misc-fixes-2024-10-02' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
Short summary of fixes pull: panthor: - Set FOP_UNSIGNED_OFFSET in fops instance - Acquire lock in panthor_vm_prepare_map_op_ctx() - Avoid ninitialized variable in tick_ctx_cleanup() - Do not block scheduler queue if work is pending - Do not add write fences to the shared BOs scheduler: - Fix locking in drm_sched_entity_modify_sched() - Fix pointer deref if entity queue changes Signed-off-by: Dave Airlie <[email protected]> From: Thomas Zimmermann <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
2 parents 156cc37 + f9e7ac6 commit 475be51

File tree

4 files changed

+30
-11
lines changed

4 files changed

+30
-11
lines changed

drivers/gpu/drm/panthor/panthor_drv.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
13831383
.read = drm_read,
13841384
.llseek = noop_llseek,
13851385
.mmap = panthor_mmap,
1386+
.fop_flags = FOP_UNSIGNED_OFFSET,
13861387
};
13871388

13881389
#ifdef CONFIG_DEBUG_FS

drivers/gpu/drm/panthor/panthor_mmu.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12511251
goto err_cleanup;
12521252
}
12531253

1254+
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
1255+
* pre-allocated BO if the <BO,VM> association exists. Given we
1256+
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
1257+
* be called immediately, and we have to hold the VM resv lock when
1258+
* calling this function.
1259+
*/
1260+
dma_resv_lock(panthor_vm_resv(vm), NULL);
12541261
mutex_lock(&bo->gpuva_list_lock);
12551262
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
12561263
mutex_unlock(&bo->gpuva_list_lock);
1264+
dma_resv_unlock(panthor_vm_resv(vm));
12571265

12581266
/* If the a vm_bo for this <VM,BO> combination exists, it already
12591267
* retains a pin ref, and we can release the one we took earlier.

drivers/gpu/drm/panthor/panthor_sched.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
11031103
list_move_tail(&group->wait_node,
11041104
&group->ptdev->scheduler->groups.waiting);
11051105
}
1106-
group->blocked_queues |= BIT(cs_id);
1106+
1107+
/* The queue is only blocked if there's no deferred operation
1108+
* pending, which can be checked through the scoreboard status.
1109+
*/
1110+
if (!cs_iface->output->status_scoreboards)
1111+
group->blocked_queues |= BIT(cs_id);
1112+
11071113
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
11081114
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
11091115
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
@@ -2046,6 +2052,7 @@ static void
20462052
tick_ctx_cleanup(struct panthor_scheduler *sched,
20472053
struct panthor_sched_tick_ctx *ctx)
20482054
{
2055+
struct panthor_device *ptdev = sched->ptdev;
20492056
struct panthor_group *group, *tmp;
20502057
u32 i;
20512058

@@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
20542061
/* If everything went fine, we should only have groups
20552062
* to be terminated in the old_groups lists.
20562063
*/
2057-
drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
2064+
drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
20582065
group_can_run(group));
20592066

20602067
if (!group_can_run(group)) {
@@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
20772084
/* If everything went fine, the groups to schedule lists should
20782085
* be empty.
20792086
*/
2080-
drm_WARN_ON(&group->ptdev->base,
2087+
drm_WARN_ON(&ptdev->base,
20812088
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
20822089

20832090
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
@@ -3436,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
34363443
{
34373444
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
34383445

3439-
/* Still not sure why we want USAGE_WRITE for external objects, since I
3440-
* was assuming this would be handled through explicit syncs being imported
3441-
* to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
3442-
* seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
3443-
*/
34443446
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3445-
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
3447+
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
34463448
}
34473449

34483450
void panthor_sched_unplug(struct panthor_device *ptdev)

drivers/gpu/drm/scheduler/sched_entity.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
133133
{
134134
WARN_ON(!num_sched_list || !sched_list);
135135

136+
spin_lock(&entity->rq_lock);
136137
entity->sched_list = sched_list;
137138
entity->num_sched_list = num_sched_list;
139+
spin_unlock(&entity->rq_lock);
138140
}
139141
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
140142

@@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
597599

598600
/* first job wakes up scheduler */
599601
if (first) {
602+
struct drm_gpu_scheduler *sched;
603+
struct drm_sched_rq *rq;
604+
600605
/* Add the entity to the run queue */
601606
spin_lock(&entity->rq_lock);
602607
if (entity->stopped) {
@@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
606611
return;
607612
}
608613

609-
drm_sched_rq_add_entity(entity->rq, entity);
614+
rq = entity->rq;
615+
sched = rq->sched;
616+
617+
drm_sched_rq_add_entity(rq, entity);
610618
spin_unlock(&entity->rq_lock);
611619

612620
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
613621
drm_sched_rq_update_fifo(entity, submit_ts);
614622

615-
drm_sched_wakeup(entity->rq->sched);
623+
drm_sched_wakeup(sched);
616624
}
617625
}
618626
EXPORT_SYMBOL(drm_sched_entity_push_job);

0 commit comments

Comments
 (0)