@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker)
2695
2695
WARN_ON_ONCE (set_cpus_allowed_ptr (worker -> task , cpu_possible_mask ) < 0 );
2696
2696
}
2697
2697
2698
+
2699
+ static void detach_worker (struct worker * worker )
2700
+ {
2701
+ lockdep_assert_held (& wq_pool_attach_mutex );
2702
+
2703
+ unbind_worker (worker );
2704
+ list_del (& worker -> node );
2705
+ worker -> pool = NULL ;
2706
+ }
2707
+
2698
2708
/**
2699
2709
* worker_detach_from_pool() - detach a worker from its pool
2700
2710
* @worker: worker which is attached to its pool
@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker)
2711
2721
WARN_ON_ONCE (pool -> flags & POOL_BH );
2712
2722
2713
2723
mutex_lock (& wq_pool_attach_mutex );
2714
-
2715
- unbind_worker (worker );
2716
- list_del (& worker -> node );
2717
- worker -> pool = NULL ;
2718
-
2724
+ detach_worker (worker );
2719
2725
mutex_unlock (& wq_pool_attach_mutex );
2720
2726
2721
2727
/* clear leftover flags without pool->lock after it is detached */
@@ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool)
2807
2813
return NULL ;
2808
2814
}
2809
2815
2810
- static void wake_dying_workers (struct list_head * cull_list )
2816
+ static void detach_dying_workers (struct list_head * cull_list )
2811
2817
{
2812
2818
struct worker * worker ;
2813
2819
2814
- list_for_each_entry (worker , cull_list , entry ) {
2815
- unbind_worker (worker );
2816
- /*
2817
- * If the worker was somehow already running, then it had to be
2818
- * in pool->idle_list when set_worker_dying() happened or we
2819
- * wouldn't have gotten here.
2820
- *
2821
- * Thus, the worker must either have observed the WORKER_DIE
2822
- * flag, or have set its state to TASK_IDLE. Either way, the
2823
- * below will be observed by the worker and is safe to do
2824
- * outside of pool->lock.
2825
- */
2826
- wake_up_process (worker -> task );
2827
- }
2820
+ list_for_each_entry (worker , cull_list , entry )
2821
+ detach_worker (worker );
2828
2822
}
2829
2823
2830
2824
static void reap_dying_workers (struct list_head * cull_list )
@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work)
2930
2924
2931
2925
/*
2932
2926
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
2933
- * cannot proceed beyong worker_detach_from_pool () in its self-destruct
2934
- * path. This is required as a previously-preempted worker could run after
2935
- * set_worker_dying() has happened but before wake_dying_workers () did.
2927
+ * cannot proceed beyong set_pf_worker () in its self-destruct path.
2928
+ * This is required as a previously-preempted worker could run after
2929
+ * set_worker_dying() has happened but before detach_dying_workers () did.
2936
2930
*/
2937
2931
mutex_lock (& wq_pool_attach_mutex );
2938
2932
raw_spin_lock_irq (& pool -> lock );
@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work)
2953
2947
}
2954
2948
2955
2949
raw_spin_unlock_irq (& pool -> lock );
2956
- wake_dying_workers (& cull_list );
2950
+ detach_dying_workers (& cull_list );
2957
2951
mutex_unlock (& wq_pool_attach_mutex );
2958
2952
2959
2953
reap_dying_workers (& cull_list );
@@ -3336,7 +3330,6 @@ static int worker_thread(void *__worker)
3336
3330
3337
3331
set_task_comm (worker -> task , "kworker/dying" );
3338
3332
ida_free (& pool -> worker_ida , worker -> id );
3339
- worker_detach_from_pool (worker );
3340
3333
WARN_ON_ONCE (!list_empty (& worker -> entry ));
3341
3334
return 0 ;
3342
3335
}
@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool)
4921
4914
WARN_ON (pool -> nr_workers || pool -> nr_idle );
4922
4915
raw_spin_unlock_irq (& pool -> lock );
4923
4916
4924
- wake_dying_workers (& cull_list );
4917
+ detach_dying_workers (& cull_list );
4925
4918
4926
4919
mutex_unlock (& wq_pool_attach_mutex );
4927
4920
0 commit comments