Skip to content

Commit d22faf7

Browse files
mairacanalpelwell
authored andcommitted
drm/v3d: Store the active job inside the queue's state
Instead of storing the queue's active job in four different variables, store the active job inside the queue's state. This way, it's possible to access all active jobs using an index based in `enum v3d_queue`. Signed-off-by: Maíra Canal <[email protected]>
1 parent 7a08d3e commit d22faf7

File tree

4 files changed

+38
-55
lines changed

4 files changed

+38
-55
lines changed

drivers/gpu/drm/v3d/v3d_drv.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@ struct v3d_queue_state {
5858

5959
/* Stores the GPU stats for this queue in the global context. */
6060
struct v3d_stats stats;
61+
62+
/* Currently active job for this queue */
63+
struct v3d_job *active_job;
6164
};
6265

6366
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -165,11 +168,6 @@ struct v3d_dev {
165168

166169
struct work_struct overflow_mem_work;
167170

168-
struct v3d_bin_job *bin_job;
169-
struct v3d_render_job *render_job;
170-
struct v3d_tfu_job *tfu_job;
171-
struct v3d_csd_job *csd_job;
172-
173171
struct v3d_queue_state queue[V3D_MAX_QUEUES];
174172

175173
/* Spinlock used to synchronize the overflow memory

drivers/gpu/drm/v3d/v3d_gem.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -327,17 +327,16 @@ void
327327
v3d_gem_destroy(struct drm_device *dev)
328328
{
329329
struct v3d_dev *v3d = to_v3d_dev(dev);
330+
enum v3d_queue q;
330331

331332
v3d_sched_fini(v3d);
332333
v3d_gemfs_fini(v3d);
333334

334335
/* Waiting for jobs to finish would need to be done before
335336
* unregistering V3D.
336337
*/
337-
WARN_ON(v3d->bin_job);
338-
WARN_ON(v3d->render_job);
339-
WARN_ON(v3d->tfu_job);
340-
WARN_ON(v3d->csd_job);
338+
for (q = 0; q < V3D_MAX_QUEUES; q++)
339+
WARN_ON(v3d->queue[q].active_job);
341340

342341
drm_mm_takedown(&v3d->mm);
343342

drivers/gpu/drm/v3d/v3d_irq.c

Lines changed: 24 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ v3d_overflow_mem_work(struct work_struct *work)
4242
container_of(work, struct v3d_dev, overflow_mem_work);
4343
struct drm_device *dev = &v3d->drm;
4444
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
45+
struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
46+
struct v3d_bin_job *bin_job;
4547
struct drm_gem_object *obj;
4648
unsigned long irqflags;
4749

@@ -61,13 +63,15 @@ v3d_overflow_mem_work(struct work_struct *work)
6163
* some binner pool anyway.
6264
*/
6365
spin_lock_irqsave(&v3d->job_lock, irqflags);
64-
if (!v3d->bin_job) {
66+
bin_job = (struct v3d_bin_job *)queue->active_job;
67+
68+
if (!bin_job) {
6569
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
6670
goto out;
6771
}
6872

6973
drm_gem_object_get(obj);
70-
list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
74+
list_add_tail(&bo->unref_head, &bin_job->render->unref_list);
7175
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
7276

7377
v3d_mmu_flush_all(v3d);
@@ -79,6 +83,20 @@ v3d_overflow_mem_work(struct work_struct *work)
7983
drm_gem_object_put(obj);
8084
}
8185

86+
static void
87+
v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q,
88+
void (*trace_irq)(struct drm_device *, uint64_t))
89+
{
90+
struct v3d_queue_state *queue = &v3d->queue[q];
91+
struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence);
92+
93+
v3d_job_update_stats(queue->active_job, q);
94+
trace_irq(&v3d->drm, fence->seqno);
95+
96+
queue->active_job = NULL;
97+
dma_fence_signal(&fence->base);
98+
}
99+
82100
static irqreturn_t
83101
v3d_irq(int irq, void *arg)
84102
{
@@ -102,41 +120,17 @@ v3d_irq(int irq, void *arg)
102120
}
103121

104122
if (intsts & V3D_INT_FLDONE) {
105-
struct v3d_fence *fence =
106-
to_v3d_fence(v3d->bin_job->base.irq_fence);
107-
108-
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
109-
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
110-
111-
v3d->bin_job = NULL;
112-
dma_fence_signal(&fence->base);
113-
123+
v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq);
114124
status = IRQ_HANDLED;
115125
}
116126

117127
if (intsts & V3D_INT_FRDONE) {
118-
struct v3d_fence *fence =
119-
to_v3d_fence(v3d->render_job->base.irq_fence);
120-
121-
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
122-
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
123-
124-
v3d->render_job = NULL;
125-
dma_fence_signal(&fence->base);
126-
128+
v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq);
127129
status = IRQ_HANDLED;
128130
}
129131

130132
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
131-
struct v3d_fence *fence =
132-
to_v3d_fence(v3d->csd_job->base.irq_fence);
133-
134-
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
135-
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
136-
137-
v3d->csd_job = NULL;
138-
dma_fence_signal(&fence->base);
139-
133+
v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq);
140134
status = IRQ_HANDLED;
141135
}
142136

@@ -168,15 +162,7 @@ v3d_hub_irq(int irq, void *arg)
168162
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
169163

170164
if (intsts & V3D_HUB_INT_TFUC) {
171-
struct v3d_fence *fence =
172-
to_v3d_fence(v3d->tfu_job->base.irq_fence);
173-
174-
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
175-
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
176-
177-
v3d->tfu_job = NULL;
178-
dma_fence_signal(&fence->base);
179-
165+
v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq);
180166
status = IRQ_HANDLED;
181167
}
182168

drivers/gpu/drm/v3d/v3d_sched.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
232232

233233
if (unlikely(job->base.base.s_fence->finished.error)) {
234234
spin_lock_irqsave(&v3d->job_lock, irqflags);
235-
v3d->bin_job = NULL;
235+
v3d->queue[V3D_BIN].active_job = NULL;
236236
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
237237
return NULL;
238238
}
@@ -241,7 +241,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
241241
* v3d_overflow_mem_work().
242242
*/
243243
spin_lock_irqsave(&v3d->job_lock, irqflags);
244-
v3d->bin_job = job;
244+
v3d->queue[V3D_BIN].active_job = &job->base;
245245
/* Clear out the overflow allocation, so we don't
246246
* reuse the overflow attached to a previous job.
247247
*/
@@ -290,11 +290,11 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
290290
struct dma_fence *fence;
291291

292292
if (unlikely(job->base.base.s_fence->finished.error)) {
293-
v3d->render_job = NULL;
293+
v3d->queue[V3D_RENDER].active_job = NULL;
294294
return NULL;
295295
}
296296

297-
v3d->render_job = job;
297+
v3d->queue[V3D_RENDER].active_job = &job->base;
298298

299299
/* Can we avoid this flush? We need to be careful of
300300
* scheduling, though -- imagine job0 rendering to texture and
@@ -338,11 +338,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
338338
struct dma_fence *fence;
339339

340340
if (unlikely(job->base.base.s_fence->finished.error)) {
341-
v3d->tfu_job = NULL;
341+
v3d->queue[V3D_TFU].active_job = NULL;
342342
return NULL;
343343
}
344344

345-
v3d->tfu_job = job;
345+
v3d->queue[V3D_TFU].active_job = &job->base;
346346

347347
fence = v3d_fence_create(v3d, V3D_TFU);
348348
if (IS_ERR(fence))
@@ -386,11 +386,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
386386
int i, csd_cfg0_reg;
387387

388388
if (unlikely(job->base.base.s_fence->finished.error)) {
389-
v3d->csd_job = NULL;
389+
v3d->queue[V3D_CSD].active_job = NULL;
390390
return NULL;
391391
}
392392

393-
v3d->csd_job = job;
393+
v3d->queue[V3D_CSD].active_job = &job->base;
394394

395395
v3d_invalidate_caches(v3d);
396396

0 commit comments

Comments
 (0)