@@ -2369,6 +2369,73 @@ static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *r
23692369static inline bool consume_remote_task (struct rq * this_rq , struct task_struct * p , struct scx_dispatch_q * dsq , struct rq * task_rq ) { return false; }
23702370#endif /* CONFIG_SMP */
23712371
2372+ /**
2373+ * move_task_between_dsqs() - Move a task from one DSQ to another
2374+ * @p: target task
2375+ * @enq_flags: %SCX_ENQ_*
2376+ * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2377+ * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2378+ *
2379+ * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2380+ * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2381+ * will change. As @p's task_rq is locked, this function doesn't need to use the
2382+ * holding_cpu mechanism.
2383+ *
2384+ * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2385+ * return value, is locked.
2386+ */
2387+ static struct rq * move_task_between_dsqs (struct task_struct * p , u64 enq_flags ,
2388+ struct scx_dispatch_q * src_dsq ,
2389+ struct scx_dispatch_q * dst_dsq )
2390+ {
2391+ struct rq * src_rq = task_rq (p ), * dst_rq ;
2392+
2393+ BUG_ON (src_dsq -> id == SCX_DSQ_LOCAL );
2394+ lockdep_assert_held (& src_dsq -> lock );
2395+ lockdep_assert_rq_held (src_rq );
2396+
2397+ if (dst_dsq -> id == SCX_DSQ_LOCAL ) {
2398+ dst_rq = container_of (dst_dsq , struct rq , scx .local_dsq );
2399+ if (!task_can_run_on_remote_rq (p , dst_rq , true)) {
2400+ dst_dsq = find_global_dsq (p );
2401+ dst_rq = src_rq ;
2402+ }
2403+ } else {
2404+ /* no need to migrate if destination is a non-local DSQ */
2405+ dst_rq = src_rq ;
2406+ }
2407+
2408+ /*
2409+ * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2410+ * CPU, @p will be migrated.
2411+ */
2412+ if (dst_dsq -> id == SCX_DSQ_LOCAL ) {
2413+ /* @p is going from a non-local DSQ to a local DSQ */
2414+ if (src_rq == dst_rq ) {
2415+ task_unlink_from_dsq (p , src_dsq );
2416+ move_local_task_to_local_dsq (p , enq_flags ,
2417+ src_dsq , dst_rq );
2418+ raw_spin_unlock (& src_dsq -> lock );
2419+ } else {
2420+ raw_spin_unlock (& src_dsq -> lock );
2421+ move_remote_task_to_local_dsq (p , enq_flags ,
2422+ src_rq , dst_rq );
2423+ }
2424+ } else {
2425+ /*
2426+ * @p is going from a non-local DSQ to a non-local DSQ. As
2427+ * $src_dsq is already locked, do an abbreviated dequeue.
2428+ */
2429+ task_unlink_from_dsq (p , src_dsq );
2430+ p -> scx .dsq = NULL ;
2431+ raw_spin_unlock (& src_dsq -> lock );
2432+
2433+ dispatch_enqueue (dst_dsq , p , enq_flags );
2434+ }
2435+
2436+ return dst_rq ;
2437+ }
2438+
23722439static bool consume_dispatch_q (struct rq * rq , struct scx_dispatch_q * dsq )
23732440{
23742441 struct task_struct * p ;
@@ -6033,7 +6100,7 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
60336100 u64 enq_flags )
60346101{
60356102 struct scx_dispatch_q * src_dsq = kit -> dsq , * dst_dsq ;
6036- struct rq * this_rq , * src_rq , * dst_rq , * locked_rq ;
6103+ struct rq * this_rq , * src_rq , * locked_rq ;
60376104 bool dispatched = false;
60386105 bool in_balance ;
60396106 unsigned long flags ;
@@ -6079,51 +6146,18 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
60796146 /* @p is still on $src_dsq and stable, determine the destination */
60806147 dst_dsq = find_dsq_for_dispatch (this_rq , dsq_id , p );
60816148
6082- if (dst_dsq -> id == SCX_DSQ_LOCAL ) {
6083- dst_rq = container_of (dst_dsq , struct rq , scx .local_dsq );
6084- if (!task_can_run_on_remote_rq (p , dst_rq , true)) {
6085- dst_dsq = find_global_dsq (p );
6086- dst_rq = src_rq ;
6087- }
6088- } else {
6089- /* no need to migrate if destination is a non-local DSQ */
6090- dst_rq = src_rq ;
6091- }
6092-
60936149 /*
6094- * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
6095- * CPU, @p will be migrated.
6150+ * Apply vtime and slice updates before moving so that the new time is
6151+ * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6152+ * this is safe as we're locking it.
60966153 */
6097- if (dst_dsq -> id == SCX_DSQ_LOCAL ) {
6098- /* @p is going from a non-local DSQ to a local DSQ */
6099- if (src_rq == dst_rq ) {
6100- task_unlink_from_dsq (p , src_dsq );
6101- move_local_task_to_local_dsq (p , enq_flags ,
6102- src_dsq , dst_rq );
6103- raw_spin_unlock (& src_dsq -> lock );
6104- } else {
6105- raw_spin_unlock (& src_dsq -> lock );
6106- move_remote_task_to_local_dsq (p , enq_flags ,
6107- src_rq , dst_rq );
6108- locked_rq = dst_rq ;
6109- }
6110- } else {
6111- /*
6112- * @p is going from a non-local DSQ to a non-local DSQ. As
6113- * $src_dsq is already locked, do an abbreviated dequeue.
6114- */
6115- task_unlink_from_dsq (p , src_dsq );
6116- p -> scx .dsq = NULL ;
6117- raw_spin_unlock (& src_dsq -> lock );
6118-
6119- if (kit -> cursor .flags & __SCX_DSQ_ITER_HAS_VTIME )
6120- p -> scx .dsq_vtime = kit -> vtime ;
6121- dispatch_enqueue (dst_dsq , p , enq_flags );
6122- }
6123-
6154+ if (kit -> cursor .flags & __SCX_DSQ_ITER_HAS_VTIME )
6155+ p -> scx .dsq_vtime = kit -> vtime ;
61246156 if (kit -> cursor .flags & __SCX_DSQ_ITER_HAS_SLICE )
61256157 p -> scx .slice = kit -> slice ;
61266158
6159+ /* execute move */
6160+ locked_rq = move_task_between_dsqs (p , enq_flags , src_dsq , dst_dsq );
61276161 dispatched = true;
61286162out :
61296163 if (in_balance ) {
0 commit comments