Skip to content

Commit f93126f

Browse files
Tvrtko UrsulinPhilipp Stanner
authored andcommitted
drm/sched: Re-group and rename the entity run-queue lock
When writing to a drm_sched_entity's run-queue, writers are protected through the lock drm_sched_entity.rq_lock. This naming, however, frequently collides with the separate internal lock of struct drm_sched_rq, resulting in uses like this: spin_lock(&entity->rq_lock); spin_lock(&entity->rq->lock); Rename drm_sched_entity.rq_lock to improve readability. While at it, re-order that struct's members to make it more obvious what the lock protects. v2: * Rename some rq_lock straddlers in kerneldoc, improve commit text. (Philipp) Signed-off-by: Tvrtko Ursulin <[email protected]> Suggested-by: Christian König <[email protected]> Cc: Alex Deucher <[email protected]> Cc: Luben Tuikov <[email protected]> Cc: Matthew Brost <[email protected]> Cc: Philipp Stanner <[email protected]> Reviewed-by: Christian König <[email protected]> [pstanner: Fix typo in docstring] Signed-off-by: Philipp Stanner <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent a6f4628 commit f93126f

File tree

3 files changed

+26
-25
lines changed

3 files changed

+26
-25
lines changed

drivers/gpu/drm/scheduler/sched_entity.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
105105
/* We start in an idle state. */
106106
complete_all(&entity->entity_idle);
107107

108-
spin_lock_init(&entity->rq_lock);
108+
spin_lock_init(&entity->lock);
109109
spsc_queue_init(&entity->job_queue);
110110

111111
atomic_set(&entity->fence_seq, 0);
@@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
133133
{
134134
WARN_ON(!num_sched_list || !sched_list);
135135

136-
spin_lock(&entity->rq_lock);
136+
spin_lock(&entity->lock);
137137
entity->sched_list = sched_list;
138138
entity->num_sched_list = num_sched_list;
139-
spin_unlock(&entity->rq_lock);
139+
spin_unlock(&entity->lock);
140140
}
141141
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142142

@@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
244244
if (!entity->rq)
245245
return;
246246

247-
spin_lock(&entity->rq_lock);
247+
spin_lock(&entity->lock);
248248
entity->stopped = true;
249249
drm_sched_rq_remove_entity(entity->rq, entity);
250-
spin_unlock(&entity->rq_lock);
250+
spin_unlock(&entity->lock);
251251

252252
/* Make sure this entity is not used by the scheduler at the moment */
253253
wait_for_completion(&entity->entity_idle);
@@ -396,9 +396,9 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
396396
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
397397
enum drm_sched_priority priority)
398398
{
399-
spin_lock(&entity->rq_lock);
399+
spin_lock(&entity->lock);
400400
entity->priority = priority;
401-
spin_unlock(&entity->rq_lock);
401+
spin_unlock(&entity->lock);
402402
}
403403
EXPORT_SYMBOL(drm_sched_entity_set_priority);
404404

@@ -515,10 +515,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
515515

516516
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
517517
if (next) {
518-
spin_lock(&entity->rq_lock);
518+
spin_lock(&entity->lock);
519519
drm_sched_rq_update_fifo_locked(entity,
520520
next->submit_ts);
521-
spin_unlock(&entity->rq_lock);
521+
spin_unlock(&entity->lock);
522522
}
523523
}
524524

@@ -559,14 +559,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
559559
if (fence && !dma_fence_is_signaled(fence))
560560
return;
561561

562-
spin_lock(&entity->rq_lock);
562+
spin_lock(&entity->lock);
563563
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
564564
rq = sched ? sched->sched_rq[entity->priority] : NULL;
565565
if (rq != entity->rq) {
566566
drm_sched_rq_remove_entity(entity->rq, entity);
567567
entity->rq = rq;
568568
}
569-
spin_unlock(&entity->rq_lock);
569+
spin_unlock(&entity->lock);
570570

571571
if (entity->num_sched_list == 1)
572572
entity->sched_list = NULL;
@@ -605,9 +605,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
605605
struct drm_sched_rq *rq;
606606

607607
/* Add the entity to the run queue */
608-
spin_lock(&entity->rq_lock);
608+
spin_lock(&entity->lock);
609609
if (entity->stopped) {
610-
spin_unlock(&entity->rq_lock);
610+
spin_unlock(&entity->lock);
611611

612612
DRM_ERROR("Trying to push to a killed entity\n");
613613
return;
@@ -621,7 +621,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
621621
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
622622
drm_sched_rq_update_fifo_locked(entity, submit_ts);
623623

624-
spin_unlock(&entity->rq_lock);
624+
spin_unlock(&entity->lock);
625625

626626
drm_sched_wakeup(sched);
627627
}

drivers/gpu/drm/scheduler/sched_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, ktime_t ts
170170
* for entity from within concurrent drm_sched_entity_select_rq and the
171171
* other to update the rb tree structure.
172172
*/
173-
lockdep_assert_held(&entity->rq_lock);
173+
lockdep_assert_held(&entity->lock);
174174

175175
spin_lock(&entity->rq->lock);
176176

include/drm/gpu_scheduler.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,14 +96,22 @@ struct drm_sched_entity {
9696
*/
9797
struct list_head list;
9898

99+
/**
100+
* @lock:
101+
*
102+
* Lock protecting the run-queue (@rq) to which this entity belongs,
103+
* @priority and the list of schedulers (@sched_list, @num_sched_list).
104+
*/
105+
spinlock_t lock;
106+
99107
/**
100108
* @rq:
101109
*
102110
* Runqueue on which this entity is currently scheduled.
103111
*
104112
* FIXME: Locking is very unclear for this. Writers are protected by
105-
* @rq_lock, but readers are generally lockless and seem to just race
106-
* with not even a READ_ONCE.
113+
* @lock, but readers are generally lockless and seem to just race with
114+
* not even a READ_ONCE.
107115
*/
108116
struct drm_sched_rq *rq;
109117

@@ -136,17 +144,10 @@ struct drm_sched_entity {
136144
* @priority:
137145
*
138146
* Priority of the entity. This can be modified by calling
139-
* drm_sched_entity_set_priority(). Protected by &rq_lock.
147+
* drm_sched_entity_set_priority(). Protected by @lock.
140148
*/
141149
enum drm_sched_priority priority;
142150

143-
/**
144-
* @rq_lock:
145-
*
146-
* Lock to modify the runqueue to which this entity belongs.
147-
*/
148-
spinlock_t rq_lock;
149-
150151
/**
151152
* @job_queue: the list of jobs of this entity.
152153
*/

0 commit comments

Comments
 (0)