@@ -2402,65 +2402,61 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2402
2402
}
2403
2403
2404
2404
#ifdef CONFIG_SMP
2405
- if (likely (task_can_run_on_remote_rq (p , dst_rq , true))) {
2406
- /*
2407
- * @p is on a possibly remote @src_rq which we need to lock to
2408
- * move the task. If dequeue is in progress, it'd be locking
2409
- * @src_rq and waiting on DISPATCHING, so we can't grab @src_rq
2410
- * lock while holding DISPATCHING.
2411
- *
2412
- * As DISPATCHING guarantees that @p is wholly ours, we can
2413
- * pretend that we're moving from a DSQ and use the same
2414
- * mechanism - mark the task under transfer with holding_cpu,
2415
- * release DISPATCHING and then follow the same protocol. See
2416
- * unlink_dsq_and_lock_src_rq().
2417
- */
2418
- p -> scx .holding_cpu = raw_smp_processor_id ();
2405
+ if (unlikely (!task_can_run_on_remote_rq (p , dst_rq , true))) {
2406
+ dispatch_enqueue (& scx_dsq_global , p , enq_flags | SCX_ENQ_CLEAR_OPSS );
2407
+ return ;
2408
+ }
2419
2409
2420
- /* store_release ensures that dequeue sees the above */
2421
- atomic_long_set_release (& p -> scx .ops_state , SCX_OPSS_NONE );
2410
+ /*
2411
+ * @p is on a possibly remote @src_rq which we need to lock to move the
2412
+ * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2413
+ * on DISPATCHING, so we can't grab @src_rq lock while holding
2414
+ * DISPATCHING.
2415
+ *
2416
+ * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2417
+ * we're moving from a DSQ and use the same mechanism - mark the task
2418
+ * under transfer with holding_cpu, release DISPATCHING and then follow
2419
+ * the same protocol. See unlink_dsq_and_lock_src_rq().
2420
+ */
2421
+ p -> scx .holding_cpu = raw_smp_processor_id ();
2422
2422
2423
- /* switch to @src_rq lock */
2424
- if (rq != src_rq ) {
2425
- raw_spin_rq_unlock (rq );
2426
- raw_spin_rq_lock (src_rq );
2427
- }
2423
+ /* store_release ensures that dequeue sees the above */
2424
+ atomic_long_set_release (& p -> scx .ops_state , SCX_OPSS_NONE );
2428
2425
2429
- /* task_rq couldn't have changed if we're still the holding cpu */
2430
- if (likely (p -> scx .holding_cpu == raw_smp_processor_id ()) &&
2431
- !WARN_ON_ONCE (src_rq != task_rq (p ))) {
2432
- /*
2433
- * If @p is staying on the same rq, there's no need to
2434
- * go through the full deactivate/activate cycle.
2435
- * Optimize by abbreviating the operations in
2436
- * move_task_to_local_dsq().
2437
- */
2438
- if (src_rq == dst_rq ) {
2439
- p -> scx .holding_cpu = -1 ;
2440
- dispatch_enqueue (& dst_rq -> scx .local_dsq ,
2441
- p , enq_flags );
2442
- } else {
2443
- move_task_to_local_dsq (p , enq_flags ,
2444
- src_rq , dst_rq );
2445
- }
2426
+ /* switch to @src_rq lock */
2427
+ if (rq != src_rq ) {
2428
+ raw_spin_rq_unlock (rq );
2429
+ raw_spin_rq_lock (src_rq );
2430
+ }
2446
2431
2447
- /* if the destination CPU is idle, wake it up */
2448
- if (sched_class_above (p -> sched_class ,
2449
- dst_rq -> curr -> sched_class ))
2450
- resched_curr (dst_rq );
2432
+ /* task_rq couldn't have changed if we're still the holding cpu */
2433
+ if (likely (p -> scx .holding_cpu == raw_smp_processor_id ()) &&
2434
+ !WARN_ON_ONCE (src_rq != task_rq (p ))) {
2435
+ /*
2436
+ * If @p is staying on the same rq, there's no need to go
2437
+ * through the full deactivate/activate cycle. Optimize by
2438
+ * abbreviating the operations in move_task_to_local_dsq().
2439
+ */
2440
+ if (src_rq == dst_rq ) {
2441
+ p -> scx .holding_cpu = -1 ;
2442
+ dispatch_enqueue (& dst_rq -> scx .local_dsq , p , enq_flags );
2443
+ } else {
2444
+ move_task_to_local_dsq (p , enq_flags , src_rq , dst_rq );
2451
2445
}
2452
2446
2453
- /* switch back to @rq lock */
2454
- if (rq != dst_rq ) {
2455
- raw_spin_rq_unlock (dst_rq );
2456
- raw_spin_rq_lock (rq );
2457
- }
2447
+ /* if the destination CPU is idle, wake it up */
2448
+ if (sched_class_above (p -> sched_class , dst_rq -> curr -> sched_class ))
2449
+ resched_curr (dst_rq );
2450
+ }
2458
2451
2459
- return ;
2452
+ /* switch back to @rq lock */
2453
+ if (rq != dst_rq ) {
2454
+ raw_spin_rq_unlock (dst_rq );
2455
+ raw_spin_rq_lock (rq );
2460
2456
}
2457
+ #else /* CONFIG_SMP */
2458
+ BUG (); /* control can not reach here on UP */
2461
2459
#endif /* CONFIG_SMP */
2462
-
2463
- dispatch_enqueue (& scx_dsq_global , p , enq_flags | SCX_ENQ_CLEAR_OPSS );
2464
2460
}
2465
2461
2466
2462
/**
0 commit comments