Skip to content

Commit e683949

Browse files
committed
sched_ext: Make find_dsq_for_dispatch() handle SCX_DSQ_LOCAL_ON
find_dsq_for_dispatch() handles all DSQ IDs except SCX_DSQ_LOCAL_ON. Instead, each caller is hanlding SCX_DSQ_LOCAL_ON before calling it. Move SCX_DSQ_LOCAL_ON lookup into find_dsq_for_dispatch() to remove duplicate code in direct_dispatch() and dispatch_to_local_dsq(). No functional changes intended. Signed-off-by: Tejun Heo <[email protected]> Acked-by: David Vernet <[email protected]>
1 parent 4d3ca89 commit e683949

File tree

1 file changed

+40
-50
lines changed

1 file changed

+40
-50
lines changed

kernel/sched/ext.c

Lines changed: 40 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1804,6 +1804,15 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
18041804
if (dsq_id == SCX_DSQ_LOCAL)
18051805
return &rq->scx.local_dsq;
18061806

1807+
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1808+
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1809+
1810+
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1811+
return &scx_dsq_global;
1812+
1813+
return &cpu_rq(cpu)->scx.local_dsq;
1814+
}
1815+
18071816
dsq = find_non_local_dsq(dsq_id);
18081817
if (unlikely(!dsq)) {
18091818
scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
@@ -1847,8 +1856,8 @@ static void mark_direct_dispatch(struct task_struct *ddsp_task,
18471856
static void direct_dispatch(struct task_struct *p, u64 enq_flags)
18481857
{
18491858
struct rq *rq = task_rq(p);
1850-
struct scx_dispatch_q *dsq;
1851-
u64 dsq_id = p->scx.ddsp_dsq_id;
1859+
struct scx_dispatch_q *dsq =
1860+
find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
18521861

18531862
touch_core_sched_dispatch(rq, p);
18541863

@@ -1860,15 +1869,9 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
18601869
* DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
18611870
* the enqueue so that it's executed when @rq can be unlocked.
18621871
*/
1863-
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1864-
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1872+
if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
18651873
unsigned long opss;
18661874

1867-
if (cpu == cpu_of(rq)) {
1868-
dsq_id = SCX_DSQ_LOCAL;
1869-
goto dispatch;
1870-
}
1871-
18721875
opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
18731876

18741877
switch (opss & SCX_OPSS_STATE_MASK) {
@@ -1895,8 +1898,6 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
18951898
return;
18961899
}
18971900

1898-
dispatch:
1899-
dsq = find_dsq_for_dispatch(rq, dsq_id, p);
19001901
dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
19011902
}
19021903

@@ -2372,51 +2373,38 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
23722373
enum dispatch_to_local_dsq_ret {
23732374
DTL_DISPATCHED, /* successfully dispatched */
23742375
DTL_LOST, /* lost race to dequeue */
2375-
DTL_NOT_LOCAL, /* destination is not a local DSQ */
23762376
DTL_INVALID, /* invalid local dsq_id */
23772377
};
23782378

23792379
/**
23802380
* dispatch_to_local_dsq - Dispatch a task to a local dsq
23812381
* @rq: current rq which is locked
2382-
* @dsq_id: destination dsq ID
2382+
* @dst_dsq: destination DSQ
23832383
* @p: task to dispatch
23842384
* @enq_flags: %SCX_ENQ_*
23852385
*
2386-
* We're holding @rq lock and want to dispatch @p to the local DSQ identified by
2387-
* @dsq_id. This function performs all the synchronization dancing needed
2388-
* because local DSQs are protected with rq locks.
2386+
* We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2387+
* DSQ. This function performs all the synchronization dancing needed because
2388+
* local DSQs are protected with rq locks.
23892389
*
23902390
* The caller must have exclusive ownership of @p (e.g. through
23912391
* %SCX_OPSS_DISPATCHING).
23922392
*/
23932393
static enum dispatch_to_local_dsq_ret
2394-
dispatch_to_local_dsq(struct rq *rq, u64 dsq_id, struct task_struct *p,
2395-
u64 enq_flags)
2394+
dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2395+
struct task_struct *p, u64 enq_flags)
23962396
{
23972397
struct rq *src_rq = task_rq(p);
2398-
struct rq *dst_rq;
2398+
struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
23992399

24002400
/*
24012401
* We're synchronized against dequeue through DISPATCHING. As @p can't
24022402
* be dequeued, its task_rq and cpus_allowed are stable too.
2403+
*
2404+
* If dispatching to @rq that @p is already on, no lock dancing needed.
24032405
*/
2404-
if (dsq_id == SCX_DSQ_LOCAL) {
2405-
dst_rq = rq;
2406-
} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
2407-
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
2408-
2409-
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2410-
return DTL_INVALID;
2411-
dst_rq = cpu_rq(cpu);
2412-
} else {
2413-
return DTL_NOT_LOCAL;
2414-
}
2415-
2416-
/* if dispatching to @rq that @p is already on, no lock dancing needed */
24172406
if (rq == src_rq && rq == dst_rq) {
2418-
dispatch_enqueue(&dst_rq->scx.local_dsq, p,
2419-
enq_flags | SCX_ENQ_CLEAR_OPSS);
2407+
dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
24202408
return DTL_DISPATCHED;
24212409
}
24222410

@@ -2558,19 +2546,21 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
25582546

25592547
BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
25602548

2561-
switch (dispatch_to_local_dsq(rq, dsq_id, p, enq_flags)) {
2562-
case DTL_DISPATCHED:
2563-
break;
2564-
case DTL_LOST:
2565-
break;
2566-
case DTL_INVALID:
2567-
dsq_id = SCX_DSQ_GLOBAL;
2568-
fallthrough;
2569-
case DTL_NOT_LOCAL:
2570-
dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()),
2571-
dsq_id, p);
2549+
dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2550+
2551+
if (dsq->id == SCX_DSQ_LOCAL) {
2552+
switch (dispatch_to_local_dsq(rq, dsq, p, enq_flags)) {
2553+
case DTL_DISPATCHED:
2554+
break;
2555+
case DTL_LOST:
2556+
break;
2557+
case DTL_INVALID:
2558+
dispatch_enqueue(&scx_dsq_global, p,
2559+
enq_flags | SCX_ENQ_CLEAR_OPSS);
2560+
break;
2561+
}
2562+
} else {
25722563
dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2573-
break;
25742564
}
25752565
}
25762566

@@ -2747,13 +2737,13 @@ static void process_ddsp_deferred_locals(struct rq *rq)
27472737
*/
27482738
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
27492739
struct task_struct, scx.dsq_list.node))) {
2750-
s32 ret;
2740+
struct scx_dispatch_q *dsq;
27512741

27522742
list_del_init(&p->scx.dsq_list.node);
27532743

2754-
ret = dispatch_to_local_dsq(rq, p->scx.ddsp_dsq_id, p,
2755-
p->scx.ddsp_enq_flags);
2756-
WARN_ON_ONCE(ret == DTL_NOT_LOCAL);
2744+
dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2745+
if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2746+
dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
27572747
}
27582748
}
27592749

0 commit comments

Comments
 (0)