Skip to content

Commit ebe4354

Browse files
committed
drm/amdgpu: switch job hw_fence to amdgpu_fence
Use the amdgpu fence container so we can store additional data in the fence. This also fixes the start_time handling for MCBP since we were casting the fence to an amdgpu_fence and it wasn't. Fixes: 3f4c175 ("drm/amdgpu: MCBP based on DRM scheduler (v9)") Reviewed-by: Christian König <[email protected]> Signed-off-by: Alex Deucher <[email protected]> (cherry picked from commit bf1cd14) Cc: [email protected]
1 parent 7f3b16f commit ebe4354

File tree

6 files changed

+32
-32
lines changed

6 files changed

+32
-32
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1902,7 +1902,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
19021902
continue;
19031903
}
19041904
job = to_amdgpu_job(s_job);
1905-
if (preempted && (&job->hw_fence) == fence)
1905+
if (preempted && (&job->hw_fence.base) == fence)
19061906
/* mark the job as preempted */
19071907
job->preemption_status |= AMDGPU_IB_PREEMPTED;
19081908
}

drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6362,7 +6362,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
63626362
*
63636363
* job->base holds a reference to parent fence
63646364
*/
6365-
if (job && dma_fence_is_signaled(&job->hw_fence)) {
6365+
if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
63666366
job_signaled = true;
63676367
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
63686368
goto skip_hw_reset;

drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

Lines changed: 7 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -41,22 +41,6 @@
4141
#include "amdgpu_trace.h"
4242
#include "amdgpu_reset.h"
4343

44-
/*
45-
* Fences mark an event in the GPUs pipeline and are used
46-
* for GPU/CPU synchronization. When the fence is written,
47-
* it is expected that all buffers associated with that fence
48-
* are no longer in use by the associated ring on the GPU and
49-
* that the relevant GPU caches have been flushed.
50-
*/
51-
52-
struct amdgpu_fence {
53-
struct dma_fence base;
54-
55-
/* RB, DMA, etc. */
56-
struct amdgpu_ring *ring;
57-
ktime_t start_timestamp;
58-
};
59-
6044
static struct kmem_cache *amdgpu_fence_slab;
6145

6246
int amdgpu_fence_slab_init(void)
@@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
151135
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
152136
if (am_fence == NULL)
153137
return -ENOMEM;
154-
fence = &am_fence->base;
155-
am_fence->ring = ring;
156138
} else {
157139
/* take use of job-embedded fence */
158-
fence = &job->hw_fence;
140+
am_fence = &job->hw_fence;
159141
}
142+
fence = &am_fence->base;
143+
am_fence->ring = ring;
160144

161145
seq = ++ring->fence_drv.sync_seq;
162146
if (job && job->job_run_counter) {
@@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
718702
* it right here or we won't be able to track them in fence_drv
719703
* and they will remain unsignaled during sa_bo free.
720704
*/
721-
job = container_of(old, struct amdgpu_job, hw_fence);
705+
job = container_of(old, struct amdgpu_job, hw_fence.base);
722706
if (!job->base.s_fence && !dma_fence_is_signaled(old))
723707
dma_fence_signal(old);
724708
RCU_INIT_POINTER(*ptr, NULL);
@@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
780764

781765
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
782766
{
783-
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
767+
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
784768

785769
return (const char *)to_amdgpu_ring(job->base.sched)->name;
786770
}
@@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
810794
*/
811795
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
812796
{
813-
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
797+
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
814798

815799
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
816800
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
845829
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
846830

847831
/* free job if fence has a parent job */
848-
kfree(container_of(f, struct amdgpu_job, hw_fence));
832+
kfree(container_of(f, struct amdgpu_job, hw_fence.base));
849833
}
850834

851835
/**

drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
272272
/* Check if any fences where initialized */
273273
if (job->base.s_fence && job->base.s_fence->finished.ops)
274274
f = &job->base.s_fence->finished;
275-
else if (job->hw_fence.ops)
276-
f = &job->hw_fence;
275+
else if (job->hw_fence.base.ops)
276+
f = &job->hw_fence.base;
277277
else
278278
f = NULL;
279279

@@ -290,10 +290,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
290290
amdgpu_sync_free(&job->explicit_sync);
291291

292292
/* only put the hw fence if has embedded fence */
293-
if (!job->hw_fence.ops)
293+
if (!job->hw_fence.base.ops)
294294
kfree(job);
295295
else
296-
dma_fence_put(&job->hw_fence);
296+
dma_fence_put(&job->hw_fence.base);
297297
}
298298

299299
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -322,10 +322,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
322322
if (job->gang_submit != &job->base.s_fence->scheduled)
323323
dma_fence_put(job->gang_submit);
324324

325-
if (!job->hw_fence.ops)
325+
if (!job->hw_fence.base.ops)
326326
kfree(job);
327327
else
328-
dma_fence_put(&job->hw_fence);
328+
dma_fence_put(&job->hw_fence.base);
329329
}
330330

331331
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)

drivers/gpu/drm/amd/amdgpu/amdgpu_job.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ struct amdgpu_job {
4848
struct drm_sched_job base;
4949
struct amdgpu_vm *vm;
5050
struct amdgpu_sync explicit_sync;
51-
struct dma_fence hw_fence;
51+
struct amdgpu_fence hw_fence;
5252
struct dma_fence *gang_submit;
5353
uint32_t preamble_status;
5454
uint32_t preemption_status;

drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,22 @@ struct amdgpu_fence_driver {
127127
struct dma_fence **fences;
128128
};
129129

130+
/*
131+
* Fences mark an event in the GPUs pipeline and are used
132+
* for GPU/CPU synchronization. When the fence is written,
133+
* it is expected that all buffers associated with that fence
134+
* are no longer in use by the associated ring on the GPU and
135+
* that the relevant GPU caches have been flushed.
136+
*/
137+
138+
struct amdgpu_fence {
139+
struct dma_fence base;
140+
141+
/* RB, DMA, etc. */
142+
struct amdgpu_ring *ring;
143+
ktime_t start_timestamp;
144+
};
145+
130146
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
131147

132148
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);

0 commit comments

Comments
 (0)