Skip to content

Commit 9e2750f

Browse files
icklejnikula
authored andcommitted
drm/i915: Keep track of request among the scheduling lists
If we keep track of when the i915_request.sched.link is on the HW runlist, or in the priority queue we can simplify our interactions with the request (such as during rescheduling). This also simplifies the next patch where we introduce a new in-between list, for requests that are ready but neither on the run list or in the queue. v2: Update i915_sched_node.link explanation for current usage where it is a link on both the queue and on the runlists. Signed-off-by: Chris Wilson <[email protected]> Cc: Mika Kuoppala <[email protected]> Cc: Tvrtko Ursulin <[email protected]> Reviewed-by: Tvrtko Ursulin <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] (cherry picked from commit 672c368) Signed-off-by: Jani Nikula <[email protected]>
1 parent cc3251d commit 9e2750f

File tree

4 files changed

+38
-18
lines changed

4 files changed

+38
-18
lines changed

drivers/gpu/drm/i915/gt/intel_lrc.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
985985
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
986986

987987
list_move(&rq->sched.link, pl);
988+
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
989+
988990
active = rq;
989991
} else {
990992
struct intel_engine_cs *owner = rq->context->engine;
@@ -2431,11 +2433,12 @@ static void execlists_preempt(struct timer_list *timer)
24312433
}
24322434

24332435
static void queue_request(struct intel_engine_cs *engine,
2434-
struct i915_sched_node *node,
2435-
int prio)
2436+
struct i915_request *rq)
24362437
{
2437-
GEM_BUG_ON(!list_empty(&node->link));
2438-
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
2438+
GEM_BUG_ON(!list_empty(&rq->sched.link));
2439+
list_add_tail(&rq->sched.link,
2440+
i915_sched_lookup_priolist(engine, rq_prio(rq)));
2441+
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
24392442
}
24402443

24412444
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -2471,7 +2474,7 @@ static void execlists_submit_request(struct i915_request *request)
24712474
/* Will be called from irq-context when using foreign fences. */
24722475
spin_lock_irqsave(&engine->active.lock, flags);
24732476

2474-
queue_request(engine, &request->sched, rq_prio(request));
2477+
queue_request(engine, request);
24752478

24762479
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
24772480
GEM_BUG_ON(list_empty(&request->sched.link));

drivers/gpu/drm/i915/i915_request.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
408408
xfer: /* We may be recursing from the signal callback of another i915 fence */
409409
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
410410

411-
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
411+
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
412412
list_move_tail(&request->sched.link, &engine->active.requests);
413+
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
414+
}
413415

414416
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
415417
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&

drivers/gpu/drm/i915/i915_request.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,18 @@ enum {
7070
*/
7171
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
7272

73+
/*
74+
* I915_FENCE_FLAG_PQUEUE - this request is ready for execution
75+
*
76+
* Using the scheduler, when a request is ready for execution it is put
77+
* into the priority queue, and removed from that queue when transferred
78+
* to the HW runlists. We want to track its membership within the
79+
* priority queue so that we can easily check before rescheduling.
80+
*
81+
* See i915_request_in_priority_queue()
82+
*/
83+
I915_FENCE_FLAG_PQUEUE,
84+
7385
/*
7486
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
7587
*
@@ -361,6 +373,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
361373
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
362374
}
363375

376+
static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
377+
{
378+
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
379+
}
380+
364381
/**
365382
* Returns true if seq1 is later than seq2.
366383
*/

drivers/gpu/drm/i915/i915_scheduler.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
326326

327327
node->attr.priority = prio;
328328

329-
if (list_empty(&node->link)) {
330-
/*
331-
* If the request is not in the priolist queue because
332-
* it is not yet runnable, then it doesn't contribute
333-
* to our preemption decisions. On the other hand,
334-
* if the request is on the HW, it too is not in the
335-
* queue; but in that case we may still need to reorder
336-
* the inflight requests.
337-
*/
329+
/*
330+
* Once the request is ready, it will be placed into the
331+
* priority lists and then onto the HW runlist. Before the
332+
* request is ready, it does not contribute to our preemption
333+
* decisions and we can safely ignore it, as it will, and
334+
* any preemption required, be dealt with upon submission.
335+
* See engine->submit_request()
336+
*/
337+
if (list_empty(&node->link))
338338
continue;
339-
}
340339

341-
if (!intel_engine_is_virtual(engine) &&
342-
!i915_request_is_active(node_to_request(node))) {
340+
if (i915_request_in_priority_queue(node_to_request(node))) {
343341
if (!cache.priolist)
344342
cache.priolist =
345343
i915_sched_lookup_priolist(engine,

0 commit comments

Comments
 (0)