Skip to content

Commit 18f8569

Browse files
committed
sched_ext: Restructure dispatch_to_local_dsq()
Now that there's nothing left after the big if block, flip the if condition and unindent the body. No functional changes intended. v2: Add BUG() to clarify control can't reach the end of dispatch_to_local_dsq() in UP kernels per David. Signed-off-by: Tejun Heo <[email protected]> Acked-by: David Vernet <[email protected]>
1 parent 0aab263 commit 18f8569

File tree

1 file changed

+46
-50
lines changed

1 file changed

+46
-50
lines changed

kernel/sched/ext.c

Lines changed: 46 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -2402,65 +2402,61 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
24022402
}
24032403

24042404
#ifdef CONFIG_SMP
2405-
if (likely(task_can_run_on_remote_rq(p, dst_rq, true))) {
2406-
/*
2407-
* @p is on a possibly remote @src_rq which we need to lock to
2408-
* move the task. If dequeue is in progress, it'd be locking
2409-
* @src_rq and waiting on DISPATCHING, so we can't grab @src_rq
2410-
* lock while holding DISPATCHING.
2411-
*
2412-
* As DISPATCHING guarantees that @p is wholly ours, we can
2413-
* pretend that we're moving from a DSQ and use the same
2414-
* mechanism - mark the task under transfer with holding_cpu,
2415-
* release DISPATCHING and then follow the same protocol. See
2416-
* unlink_dsq_and_lock_src_rq().
2417-
*/
2418-
p->scx.holding_cpu = raw_smp_processor_id();
2405+
if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2406+
dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2407+
return;
2408+
}
24192409

2420-
/* store_release ensures that dequeue sees the above */
2421-
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2410+
/*
2411+
* @p is on a possibly remote @src_rq which we need to lock to move the
2412+
* task. If dequeue is in progress, it'd be locking @src_rq and waiting
2413+
* on DISPATCHING, so we can't grab @src_rq lock while holding
2414+
* DISPATCHING.
2415+
*
2416+
* As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2417+
* we're moving from a DSQ and use the same mechanism - mark the task
2418+
* under transfer with holding_cpu, release DISPATCHING and then follow
2419+
* the same protocol. See unlink_dsq_and_lock_src_rq().
2420+
*/
2421+
p->scx.holding_cpu = raw_smp_processor_id();
24222422

2423-
/* switch to @src_rq lock */
2424-
if (rq != src_rq) {
2425-
raw_spin_rq_unlock(rq);
2426-
raw_spin_rq_lock(src_rq);
2427-
}
2423+
/* store_release ensures that dequeue sees the above */
2424+
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
24282425

2429-
/* task_rq couldn't have changed if we're still the holding cpu */
2430-
if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2431-
!WARN_ON_ONCE(src_rq != task_rq(p))) {
2432-
/*
2433-
* If @p is staying on the same rq, there's no need to
2434-
* go through the full deactivate/activate cycle.
2435-
* Optimize by abbreviating the operations in
2436-
* move_task_to_local_dsq().
2437-
*/
2438-
if (src_rq == dst_rq) {
2439-
p->scx.holding_cpu = -1;
2440-
dispatch_enqueue(&dst_rq->scx.local_dsq,
2441-
p, enq_flags);
2442-
} else {
2443-
move_task_to_local_dsq(p, enq_flags,
2444-
src_rq, dst_rq);
2445-
}
2426+
/* switch to @src_rq lock */
2427+
if (rq != src_rq) {
2428+
raw_spin_rq_unlock(rq);
2429+
raw_spin_rq_lock(src_rq);
2430+
}
24462431

2447-
/* if the destination CPU is idle, wake it up */
2448-
if (sched_class_above(p->sched_class,
2449-
dst_rq->curr->sched_class))
2450-
resched_curr(dst_rq);
2432+
/* task_rq couldn't have changed if we're still the holding cpu */
2433+
if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2434+
!WARN_ON_ONCE(src_rq != task_rq(p))) {
2435+
/*
2436+
* If @p is staying on the same rq, there's no need to go
2437+
* through the full deactivate/activate cycle. Optimize by
2438+
* abbreviating the operations in move_task_to_local_dsq().
2439+
*/
2440+
if (src_rq == dst_rq) {
2441+
p->scx.holding_cpu = -1;
2442+
dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2443+
} else {
2444+
move_task_to_local_dsq(p, enq_flags, src_rq, dst_rq);
24512445
}
24522446

2453-
/* switch back to @rq lock */
2454-
if (rq != dst_rq) {
2455-
raw_spin_rq_unlock(dst_rq);
2456-
raw_spin_rq_lock(rq);
2457-
}
2447+
/* if the destination CPU is idle, wake it up */
2448+
if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2449+
resched_curr(dst_rq);
2450+
}
24582451

2459-
return;
2452+
/* switch back to @rq lock */
2453+
if (rq != dst_rq) {
2454+
raw_spin_rq_unlock(dst_rq);
2455+
raw_spin_rq_lock(rq);
24602456
}
2457+
#else /* CONFIG_SMP */
2458+
BUG(); /* control can not reach here on UP */
24612459
#endif /* CONFIG_SMP */
2462-
2463-
dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
24642460
}
24652461

24662462
/**

0 commit comments

Comments
 (0)