@@ -2172,32 +2172,39 @@ static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2172
2172
return false;
2173
2173
}
2174
2174
2175
- static void consume_local_task (struct task_struct * p ,
2176
- struct scx_dispatch_q * dsq , struct rq * rq )
2175
+ static void move_local_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2176
+ struct scx_dispatch_q * src_dsq ,
2177
+ struct rq * dst_rq )
2177
2178
{
2178
- lockdep_assert_held (& dsq -> lock ); /* released on return */
2179
+ struct scx_dispatch_q * dst_dsq = & dst_rq -> scx .local_dsq ;
2180
+
2181
+ /* @dsq is locked and @p is on @dst_rq */
2182
+ lockdep_assert_held (& src_dsq -> lock );
2183
+ lockdep_assert_rq_held (dst_rq );
2179
2184
2180
- /* @dsq is locked and @p is on this rq */
2181
2185
WARN_ON_ONCE (p -> scx .holding_cpu >= 0 );
2182
- task_unlink_from_dsq (p , dsq );
2183
- list_add_tail (& p -> scx .dsq_list .node , & rq -> scx .local_dsq .list );
2184
- dsq_mod_nr (& rq -> scx .local_dsq , 1 );
2185
- p -> scx .dsq = & rq -> scx .local_dsq ;
2186
- raw_spin_unlock (& dsq -> lock );
2186
+
2187
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT ))
2188
+ list_add (& p -> scx .dsq_list .node , & dst_dsq -> list );
2189
+ else
2190
+ list_add_tail (& p -> scx .dsq_list .node , & dst_dsq -> list );
2191
+
2192
+ dsq_mod_nr (dst_dsq , 1 );
2193
+ p -> scx .dsq = dst_dsq ;
2187
2194
}
2188
2195
2189
2196
#ifdef CONFIG_SMP
2190
2197
/**
2191
- * move_task_to_local_dsq - Move a task from a different rq to a local DSQ
2198
+ * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2192
2199
* @p: task to move
2193
2200
* @enq_flags: %SCX_ENQ_*
2194
2201
* @src_rq: rq to move the task from, locked on entry, released on return
2195
2202
* @dst_rq: rq to move the task into, locked on return
2196
2203
*
2197
2204
* Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2198
2205
*/
2199
- static void move_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2200
- struct rq * src_rq , struct rq * dst_rq )
2206
+ static void move_remote_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2207
+ struct rq * src_rq , struct rq * dst_rq )
2201
2208
{
2202
2209
lockdep_assert_rq_held (src_rq );
2203
2210
@@ -2320,7 +2327,7 @@ static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2320
2327
raw_spin_rq_unlock (this_rq );
2321
2328
2322
2329
if (unlink_dsq_and_lock_src_rq (p , dsq , src_rq )) {
2323
- move_task_to_local_dsq (p , 0 , src_rq , this_rq );
2330
+ move_remote_task_to_local_dsq (p , 0 , src_rq , this_rq );
2324
2331
return true;
2325
2332
} else {
2326
2333
raw_spin_rq_unlock (src_rq );
@@ -2351,7 +2358,9 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2351
2358
struct rq * task_rq = task_rq (p );
2352
2359
2353
2360
if (rq == task_rq ) {
2354
- consume_local_task (p , dsq , rq );
2361
+ task_unlink_from_dsq (p , dsq );
2362
+ move_local_task_to_local_dsq (p , 0 , dsq , rq );
2363
+ raw_spin_unlock (& dsq -> lock );
2355
2364
return true;
2356
2365
}
2357
2366
@@ -2431,13 +2440,14 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2431
2440
/*
2432
2441
* If @p is staying on the same rq, there's no need to go
2433
2442
* through the full deactivate/activate cycle. Optimize by
2434
- * abbreviating the operations in move_task_to_local_dsq ().
2443
+ * abbreviating move_remote_task_to_local_dsq ().
2435
2444
*/
2436
2445
if (src_rq == dst_rq ) {
2437
2446
p -> scx .holding_cpu = -1 ;
2438
2447
dispatch_enqueue (& dst_rq -> scx .local_dsq , p , enq_flags );
2439
2448
} else {
2440
- move_task_to_local_dsq (p , enq_flags , src_rq , dst_rq );
2449
+ move_remote_task_to_local_dsq (p , enq_flags ,
2450
+ src_rq , dst_rq );
2441
2451
}
2442
2452
2443
2453
/* if the destination CPU is idle, wake it up */
0 commit comments