Skip to content

Commit 1d8a5ca

Browse files
committed
drm/msm: Conversion to drm scheduler
For existing adrenos, there is one or more ringbuffer, depending on whether preemption is supported. When preemption is supported, each ringbuffer has it's own priority. A submitqueue (which maps to a gl context or vk queue in userspace) is mapped to a specific ring- buffer at creation time, based on the submitqueue's priority. Each ringbuffer has it's own drm_gpu_scheduler. Each submitqueue maps to a drm_sched_entity. And each submit maps to a drm_sched_job. Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/4 Signed-off-by: Rob Clark <[email protected]> Acked-by: Christian König <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Rob Clark <[email protected]>
1 parent 79341eb commit 1d8a5ca

File tree

10 files changed

+214
-131
lines changed

10 files changed

+214
-131
lines changed

drivers/gpu/drm/msm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ config DRM_MSM
1414
select REGULATOR
1515
select DRM_KMS_HELPER
1616
select DRM_PANEL
17+
select DRM_SCHED
1718
select SHMEM
1819
select TMPFS
1920
select QCOM_SCM if ARCH_QCOM

drivers/gpu/drm/msm/msm_gem.c

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -804,41 +804,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
804804
msm_obj->vaddr = NULL;
805805
}
806806

807-
/* must be called before _move_to_active().. */
808-
int msm_gem_sync_object(struct drm_gem_object *obj,
809-
struct msm_fence_context *fctx, bool exclusive)
810-
{
811-
struct dma_resv_list *fobj;
812-
struct dma_fence *fence;
813-
int i, ret;
814-
815-
fobj = dma_resv_shared_list(obj->resv);
816-
if (!fobj || (fobj->shared_count == 0)) {
817-
fence = dma_resv_excl_fence(obj->resv);
818-
/* don't need to wait on our own fences, since ring is fifo */
819-
if (fence && (fence->context != fctx->context)) {
820-
ret = dma_fence_wait(fence, true);
821-
if (ret)
822-
return ret;
823-
}
824-
}
825-
826-
if (!exclusive || !fobj)
827-
return 0;
828-
829-
for (i = 0; i < fobj->shared_count; i++) {
830-
fence = rcu_dereference_protected(fobj->shared[i],
831-
dma_resv_held(obj->resv));
832-
if (fence->context != fctx->context) {
833-
ret = dma_fence_wait(fence, true);
834-
if (ret)
835-
return ret;
836-
}
837-
}
838-
839-
return 0;
840-
}
841-
842807
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
843808
{
844809
struct msm_gem_object *msm_obj = to_msm_bo(obj);

drivers/gpu/drm/msm/msm_gem.h

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <linux/kref.h>
1111
#include <linux/dma-resv.h>
12+
#include "drm/gpu_scheduler.h"
1213
#include "msm_drv.h"
1314

1415
/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
@@ -143,8 +144,6 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
143144
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
144145
void msm_gem_put_vaddr(struct drm_gem_object *obj);
145146
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
146-
int msm_gem_sync_object(struct drm_gem_object *obj,
147-
struct msm_fence_context *fctx, bool exclusive);
148147
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
149148
void msm_gem_active_put(struct drm_gem_object *obj);
150149
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
@@ -311,6 +310,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
311310
* make it easier to unwind when things go wrong, etc).
312311
*/
313312
struct msm_gem_submit {
313+
struct drm_sched_job base;
314314
struct kref ref;
315315
struct drm_device *dev;
316316
struct msm_gpu *gpu;
@@ -319,7 +319,22 @@ struct msm_gem_submit {
319319
struct list_head bo_list;
320320
struct ww_acquire_ctx ticket;
321321
uint32_t seqno; /* Sequence number of the submit on the ring */
322-
struct dma_fence *fence;
322+
323+
/* Array of struct dma_fence * to block on before submitting this job.
324+
*/
325+
struct xarray deps;
326+
unsigned long last_dep;
327+
328+
/* Hw fence, which is created when the scheduler executes the job, and
329+
* is signaled when the hw finishes (via seqno write from cmdstream)
330+
*/
331+
struct dma_fence *hw_fence;
332+
333+
/* Userspace visible fence, which is signaled by the scheduler after
334+
* the hw_fence is signaled.
335+
*/
336+
struct dma_fence *user_fence;
337+
323338
int fence_id; /* key into queue->fence_idr */
324339
struct msm_gpu_submitqueue *queue;
325340
struct pid *pid; /* submitting process */
@@ -350,6 +365,11 @@ struct msm_gem_submit {
350365
} bos[];
351366
};
352367

368+
static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
369+
{
370+
return container_of(job, struct msm_gem_submit, base);
371+
}
372+
353373
void __msm_gem_submit_destroy(struct kref *kref);
354374

355375
static inline void msm_gem_submit_get(struct msm_gem_submit *submit)

0 commit comments

Comments
 (0)