@@ -248,7 +248,7 @@ struct workqueue_struct {
248
248
struct list_head flusher_overflow ; /* WQ: flush overflow list */
249
249
250
250
struct list_head maydays ; /* MD: pwqs requesting rescue */
251
- struct worker * rescuer ; /* I : rescue worker */
251
+ struct worker * rescuer ; /* MD : rescue worker */
252
252
253
253
int nr_drainers ; /* WQ: drain in progress */
254
254
int saved_max_active ; /* WQ: saved pwq max_active */
@@ -355,6 +355,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
355
355
356
356
static int worker_thread (void * __worker );
357
357
static void workqueue_sysfs_unregister (struct workqueue_struct * wq );
358
+ static void show_pwq (struct pool_workqueue * pwq );
358
359
359
360
#define CREATE_TRACE_POINTS
360
361
#include <trace/events/workqueue.h>
@@ -425,7 +426,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
425
426
* ignored.
426
427
*/
427
428
#define for_each_pwq (pwq , wq ) \
428
- list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
429
+ list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
430
+ lockdep_is_held(&wq->mutex)) \
429
431
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
430
432
else
431
433
@@ -2532,8 +2534,14 @@ static int rescuer_thread(void *__rescuer)
2532
2534
*/
2533
2535
if (need_to_create_worker (pool )) {
2534
2536
spin_lock (& wq_mayday_lock );
2535
- get_pwq (pwq );
2536
- list_move_tail (& pwq -> mayday_node , & wq -> maydays );
2537
+ /*
2538
+ * Queue iff we aren't racing destruction
2539
+ * and somebody else hasn't queued it already.
2540
+ */
2541
+ if (wq -> rescuer && list_empty (& pwq -> mayday_node )) {
2542
+ get_pwq (pwq );
2543
+ list_add_tail (& pwq -> mayday_node , & wq -> maydays );
2544
+ }
2537
2545
spin_unlock (& wq_mayday_lock );
2538
2546
}
2539
2547
}
@@ -4314,6 +4322,22 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
4314
4322
}
4315
4323
EXPORT_SYMBOL_GPL (alloc_workqueue );
4316
4324
4325
+ static bool pwq_busy (struct pool_workqueue * pwq )
4326
+ {
4327
+ int i ;
4328
+
4329
+ for (i = 0 ; i < WORK_NR_COLORS ; i ++ )
4330
+ if (pwq -> nr_in_flight [i ])
4331
+ return true;
4332
+
4333
+ if ((pwq != pwq -> wq -> dfl_pwq ) && (pwq -> refcnt > 1 ))
4334
+ return true;
4335
+ if (pwq -> nr_active || !list_empty (& pwq -> delayed_works ))
4336
+ return true;
4337
+
4338
+ return false;
4339
+ }
4340
+
4317
4341
/**
4318
4342
* destroy_workqueue - safely terminate a workqueue
4319
4343
* @wq: target workqueue
@@ -4325,31 +4349,51 @@ void destroy_workqueue(struct workqueue_struct *wq)
4325
4349
struct pool_workqueue * pwq ;
4326
4350
int node ;
4327
4351
4352
+ /*
4353
+ * Remove it from sysfs first so that sanity check failure doesn't
4354
+ * lead to sysfs name conflicts.
4355
+ */
4356
+ workqueue_sysfs_unregister (wq );
4357
+
4328
4358
/* drain it before proceeding with destruction */
4329
4359
drain_workqueue (wq );
4330
4360
4331
- /* sanity checks */
4332
- mutex_lock (& wq -> mutex );
4333
- for_each_pwq (pwq , wq ) {
4334
- int i ;
4361
+ /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4362
+ if (wq -> rescuer ) {
4363
+ struct worker * rescuer = wq -> rescuer ;
4335
4364
4336
- for (i = 0 ; i < WORK_NR_COLORS ; i ++ ) {
4337
- if (WARN_ON (pwq -> nr_in_flight [i ])) {
4338
- mutex_unlock (& wq -> mutex );
4339
- show_workqueue_state ();
4340
- return ;
4341
- }
4342
- }
4365
+ /* this prevents new queueing */
4366
+ spin_lock_irq (& wq_mayday_lock );
4367
+ wq -> rescuer = NULL ;
4368
+ spin_unlock_irq (& wq_mayday_lock );
4369
+
4370
+ /* rescuer will empty maydays list before exiting */
4371
+ kthread_stop (rescuer -> task );
4372
+ kfree (rescuer );
4373
+ }
4343
4374
4344
- if (WARN_ON ((pwq != wq -> dfl_pwq ) && (pwq -> refcnt > 1 )) ||
4345
- WARN_ON (pwq -> nr_active ) ||
4346
- WARN_ON (!list_empty (& pwq -> delayed_works ))) {
4375
+ /*
4376
+ * Sanity checks - grab all the locks so that we wait for all
4377
+ * in-flight operations which may do put_pwq().
4378
+ */
4379
+ mutex_lock (& wq_pool_mutex );
4380
+ mutex_lock (& wq -> mutex );
4381
+ for_each_pwq (pwq , wq ) {
4382
+ spin_lock_irq (& pwq -> pool -> lock );
4383
+ if (WARN_ON (pwq_busy (pwq ))) {
4384
+ pr_warning ("%s: %s has the following busy pwq\n" ,
4385
+ __func__ , wq -> name );
4386
+ show_pwq (pwq );
4387
+ spin_unlock_irq (& pwq -> pool -> lock );
4347
4388
mutex_unlock (& wq -> mutex );
4389
+ mutex_unlock (& wq_pool_mutex );
4348
4390
show_workqueue_state ();
4349
4391
return ;
4350
4392
}
4393
+ spin_unlock_irq (& pwq -> pool -> lock );
4351
4394
}
4352
4395
mutex_unlock (& wq -> mutex );
4396
+ mutex_unlock (& wq_pool_mutex );
4353
4397
4354
4398
/*
4355
4399
* wq list is used to freeze wq, remove from list after
@@ -4359,11 +4403,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
4359
4403
list_del_rcu (& wq -> list );
4360
4404
mutex_unlock (& wq_pool_mutex );
4361
4405
4362
- workqueue_sysfs_unregister (wq );
4363
-
4364
- if (wq -> rescuer )
4365
- kthread_stop (wq -> rescuer -> task );
4366
-
4367
4406
if (!(wq -> flags & WQ_UNBOUND )) {
4368
4407
wq_unregister_lockdep (wq );
4369
4408
/*
@@ -4638,7 +4677,8 @@ static void show_pwq(struct pool_workqueue *pwq)
4638
4677
pr_info (" pwq %d:" , pool -> id );
4639
4678
pr_cont_pool_info (pool );
4640
4679
4641
- pr_cont (" active=%d/%d%s\n" , pwq -> nr_active , pwq -> max_active ,
4680
+ pr_cont (" active=%d/%d refcnt=%d%s\n" ,
4681
+ pwq -> nr_active , pwq -> max_active , pwq -> refcnt ,
4642
4682
!list_empty (& pwq -> mayday_node ) ? " MAYDAY" : "" );
4643
4683
4644
4684
hash_for_each (pool -> busy_hash , bkt , worker , hentry ) {
@@ -4657,7 +4697,7 @@ static void show_pwq(struct pool_workqueue *pwq)
4657
4697
4658
4698
pr_cont ("%s %d%s:%ps" , comma ? "," : "" ,
4659
4699
task_pid_nr (worker -> task ),
4660
- worker == pwq -> wq -> rescuer ? "(RESCUER)" : "" ,
4700
+ worker -> rescue_wq ? "(RESCUER)" : "" ,
4661
4701
worker -> current_func );
4662
4702
list_for_each_entry (work , & worker -> scheduled , entry )
4663
4703
pr_cont_work (false, work );
0 commit comments