Skip to content

Commit 8497376

Browse files
icklejlahtine-intel
authored andcommitted
drm/i915/gt: Incorporate the virtual engine into timeslicing
It was quite the oversight to only factor in the normal queue to decide the timeslicing switch priority. By leaving out the next virtual request from the priority decision, we would not timeslice the current engine if there was an available virtual request. Testcase: igt/gem_exec_balancer/sliced Fixes: 3df2dee ("drm/i915/execlists: Enable timeslice on partial virtual engine dequeue") Signed-off-by: Chris Wilson <[email protected]> Cc: Mika Kuoppala <[email protected]> Cc: Tvrtko Ursulin <[email protected]> Reviewed-by: Tvrtko Ursulin <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] (cherry picked from commit 6ad249b) Signed-off-by: Joonas Lahtinen <[email protected]>
1 parent b3a9e3b commit 8497376

File tree

1 file changed

+24
-6
lines changed

1 file changed

+24
-6
lines changed

drivers/gpu/drm/i915/gt/intel_lrc.c

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1895,14 +1895,33 @@ static void defer_active(struct intel_engine_cs *engine)
18951895

18961896
static bool
18971897
need_timeslice(const struct intel_engine_cs *engine,
1898-
const struct i915_request *rq)
1898+
const struct i915_request *rq,
1899+
const struct rb_node *rb)
18991900
{
19001901
int hint;
19011902

19021903
if (!intel_engine_has_timeslices(engine))
19031904
return false;
19041905

19051906
hint = engine->execlists.queue_priority_hint;
1907+
1908+
if (rb) {
1909+
const struct virtual_engine *ve =
1910+
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
1911+
const struct intel_engine_cs *inflight =
1912+
intel_context_inflight(&ve->context);
1913+
1914+
if (!inflight || inflight == engine) {
1915+
struct i915_request *next;
1916+
1917+
rcu_read_lock();
1918+
next = READ_ONCE(ve->request);
1919+
if (next)
1920+
hint = max(hint, rq_prio(next));
1921+
rcu_read_unlock();
1922+
}
1923+
}
1924+
19061925
if (!list_is_last(&rq->sched.link, &engine->active.requests))
19071926
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
19081927

@@ -1977,10 +1996,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
19771996
set_timer_ms(&engine->execlists.timer, duration);
19781997
}
19791998

1980-
static void start_timeslice(struct intel_engine_cs *engine)
1999+
static void start_timeslice(struct intel_engine_cs *engine, int prio)
19812000
{
19822001
struct intel_engine_execlists *execlists = &engine->execlists;
1983-
const int prio = queue_prio(execlists);
19842002
unsigned long duration;
19852003

19862004
if (!intel_engine_has_timeslices(engine))
@@ -2140,7 +2158,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
21402158
__unwind_incomplete_requests(engine);
21412159

21422160
last = NULL;
2143-
} else if (need_timeslice(engine, last) &&
2161+
} else if (need_timeslice(engine, last, rb) &&
21442162
timeslice_expired(execlists, last)) {
21452163
if (i915_request_completed(last)) {
21462164
tasklet_hi_schedule(&execlists->tasklet);
@@ -2188,7 +2206,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
21882206
* Even if ELSP[1] is occupied and not worthy
21892207
* of timeslices, our queue might be.
21902208
*/
2191-
start_timeslice(engine);
2209+
start_timeslice(engine, queue_prio(execlists));
21922210
return;
21932211
}
21942212
}
@@ -2223,7 +2241,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
22232241

22242242
if (last && !can_merge_rq(last, rq)) {
22252243
spin_unlock(&ve->base.active.lock);
2226-
start_timeslice(engine);
2244+
start_timeslice(engine, rq_prio(rq));
22272245
return; /* leave this for another sibling */
22282246
}
22292247

0 commit comments

Comments
 (0)