@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
375
375
static int worker_thread (void * __worker );
376
376
static void workqueue_sysfs_unregister (struct workqueue_struct * wq );
377
377
static void show_pwq (struct pool_workqueue * pwq );
378
+ static void show_one_worker_pool (struct worker_pool * pool );
378
379
379
380
#define CREATE_TRACE_POINTS
380
381
#include <trace/events/workqueue.h>
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
4447
4448
raw_spin_unlock_irq (& pwq -> pool -> lock );
4448
4449
mutex_unlock (& wq -> mutex );
4449
4450
mutex_unlock (& wq_pool_mutex );
4450
- show_workqueue_state ( );
4451
+ show_one_workqueue ( wq );
4451
4452
return ;
4452
4453
}
4453
4454
raw_spin_unlock_irq (& pwq -> pool -> lock );
@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
4797
4798
}
4798
4799
4799
4800
/**
4800
- * show_workqueue_state - dump workqueue state
4801
- *
4802
- * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4803
- * all busy workqueues and pools.
4801
+ * show_one_workqueue - dump state of specified workqueue
4802
+ * @wq: workqueue whose state will be printed
4804
4803
*/
4805
- void show_workqueue_state ( void )
4804
+ void show_one_workqueue ( struct workqueue_struct * wq )
4806
4805
{
4807
- struct workqueue_struct * wq ;
4808
- struct worker_pool * pool ;
4806
+ struct pool_workqueue * pwq ;
4807
+ bool idle = true ;
4809
4808
unsigned long flags ;
4810
- int pi ;
4811
-
4812
- rcu_read_lock ();
4813
-
4814
- pr_info ("Showing busy workqueues and worker pools:\n" );
4815
-
4816
- list_for_each_entry_rcu (wq , & workqueues , list ) {
4817
- struct pool_workqueue * pwq ;
4818
- bool idle = true;
4819
4809
4820
- for_each_pwq (pwq , wq ) {
4821
- if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4822
- idle = false;
4823
- break ;
4824
- }
4810
+ for_each_pwq (pwq , wq ) {
4811
+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4812
+ idle = false;
4813
+ break ;
4825
4814
}
4826
- if (idle )
4827
- continue ;
4815
+ }
4816
+ if (idle ) /* Nothing to print for idle workqueue */
4817
+ return ;
4828
4818
4829
- pr_info ("workqueue %s: flags=0x%x\n" , wq -> name , wq -> flags );
4819
+ pr_info ("workqueue %s: flags=0x%x\n" , wq -> name , wq -> flags );
4830
4820
4831
- for_each_pwq (pwq , wq ) {
4832
- raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4833
- if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4834
- /*
4835
- * Defer printing to avoid deadlocks in console
4836
- * drivers that queue work while holding locks
4837
- * also taken in their write paths.
4838
- */
4839
- printk_deferred_enter ();
4840
- show_pwq (pwq );
4841
- printk_deferred_exit ();
4842
- }
4843
- raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4821
+ for_each_pwq (pwq , wq ) {
4822
+ raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4823
+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4844
4824
/*
4845
- * We could be printing a lot from atomic context, e.g.
4846
- * sysrq-t -> show_workqueue_state(). Avoid triggering
4847
- * hard lockup .
4825
+ * Defer printing to avoid deadlocks in console
4826
+ * drivers that queue work while holding locks
4827
+ * also taken in their write paths .
4848
4828
*/
4849
- touch_nmi_watchdog ();
4850
- }
4851
- }
4852
-
4853
- for_each_pool (pool , pi ) {
4854
- struct worker * worker ;
4855
- bool first = true;
4856
-
4857
- raw_spin_lock_irqsave (& pool -> lock , flags );
4858
- if (pool -> nr_workers == pool -> nr_idle )
4859
- goto next_pool ;
4860
- /*
4861
- * Defer printing to avoid deadlocks in console drivers that
4862
- * queue work while holding locks also taken in their write
4863
- * paths.
4864
- */
4865
- printk_deferred_enter ();
4866
- pr_info ("pool %d:" , pool -> id );
4867
- pr_cont_pool_info (pool );
4868
- pr_cont (" hung=%us workers=%d" ,
4869
- jiffies_to_msecs (jiffies - pool -> watchdog_ts ) / 1000 ,
4870
- pool -> nr_workers );
4871
- if (pool -> manager )
4872
- pr_cont (" manager: %d" ,
4873
- task_pid_nr (pool -> manager -> task ));
4874
- list_for_each_entry (worker , & pool -> idle_list , entry ) {
4875
- pr_cont (" %s%d" , first ? "idle: " : "" ,
4876
- task_pid_nr (worker -> task ));
4877
- first = false;
4829
+ printk_deferred_enter ();
4830
+ show_pwq (pwq );
4831
+ printk_deferred_exit ();
4878
4832
}
4879
- pr_cont ("\n" );
4880
- printk_deferred_exit ();
4881
- next_pool :
4882
- raw_spin_unlock_irqrestore (& pool -> lock , flags );
4833
+ raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4883
4834
/*
4884
4835
* We could be printing a lot from atomic context, e.g.
4885
- * sysrq-t -> show_workqueue_state (). Avoid triggering
4836
+ * sysrq-t -> show_all_workqueues (). Avoid triggering
4886
4837
* hard lockup.
4887
4838
*/
4888
4839
touch_nmi_watchdog ();
4889
4840
}
4890
4841
4842
+ }
4843
+
4844
+ /**
4845
+ * show_one_worker_pool - dump state of specified worker pool
4846
+ * @pool: worker pool whose state will be printed
4847
+ */
4848
+ static void show_one_worker_pool (struct worker_pool * pool )
4849
+ {
4850
+ struct worker * worker ;
4851
+ bool first = true;
4852
+ unsigned long flags ;
4853
+
4854
+ raw_spin_lock_irqsave (& pool -> lock , flags );
4855
+ if (pool -> nr_workers == pool -> nr_idle )
4856
+ goto next_pool ;
4857
+ /*
4858
+ * Defer printing to avoid deadlocks in console drivers that
4859
+ * queue work while holding locks also taken in their write
4860
+ * paths.
4861
+ */
4862
+ printk_deferred_enter ();
4863
+ pr_info ("pool %d:" , pool -> id );
4864
+ pr_cont_pool_info (pool );
4865
+ pr_cont (" hung=%us workers=%d" ,
4866
+ jiffies_to_msecs (jiffies - pool -> watchdog_ts ) / 1000 ,
4867
+ pool -> nr_workers );
4868
+ if (pool -> manager )
4869
+ pr_cont (" manager: %d" ,
4870
+ task_pid_nr (pool -> manager -> task ));
4871
+ list_for_each_entry (worker , & pool -> idle_list , entry ) {
4872
+ pr_cont (" %s%d" , first ? "idle: " : "" ,
4873
+ task_pid_nr (worker -> task ));
4874
+ first = false;
4875
+ }
4876
+ pr_cont ("\n" );
4877
+ printk_deferred_exit ();
4878
+ next_pool :
4879
+ raw_spin_unlock_irqrestore (& pool -> lock , flags );
4880
+ /*
4881
+ * We could be printing a lot from atomic context, e.g.
4882
+ * sysrq-t -> show_all_workqueues(). Avoid triggering
4883
+ * hard lockup.
4884
+ */
4885
+ touch_nmi_watchdog ();
4886
+
4887
+ }
4888
+
4889
+ /**
4890
+ * show_all_workqueues - dump workqueue state
4891
+ *
4892
+ * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4893
+ * all busy workqueues and pools.
4894
+ */
4895
+ void show_all_workqueues (void )
4896
+ {
4897
+ struct workqueue_struct * wq ;
4898
+ struct worker_pool * pool ;
4899
+ int pi ;
4900
+
4901
+ rcu_read_lock ();
4902
+
4903
+ pr_info ("Showing busy workqueues and worker pools:\n" );
4904
+
4905
+ list_for_each_entry_rcu (wq , & workqueues , list )
4906
+ show_one_workqueue (wq );
4907
+
4908
+ for_each_pool (pool , pi )
4909
+ show_one_worker_pool (pool );
4910
+
4891
4911
rcu_read_unlock ();
4892
4912
}
4893
4913
@@ -5384,16 +5404,22 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5384
5404
int ret = - EINVAL ;
5385
5405
cpumask_var_t saved_cpumask ;
5386
5406
5387
- if (!zalloc_cpumask_var (& saved_cpumask , GFP_KERNEL ))
5388
- return - ENOMEM ;
5389
-
5390
5407
/*
5391
5408
* Not excluding isolated cpus on purpose.
5392
5409
* If the user wishes to include them, we allow that.
5393
5410
*/
5394
5411
cpumask_and (cpumask , cpumask , cpu_possible_mask );
5395
5412
if (!cpumask_empty (cpumask )) {
5396
5413
apply_wqattrs_lock ();
5414
+ if (cpumask_equal (cpumask , wq_unbound_cpumask )) {
5415
+ ret = 0 ;
5416
+ goto out_unlock ;
5417
+ }
5418
+
5419
+ if (!zalloc_cpumask_var (& saved_cpumask , GFP_KERNEL )) {
5420
+ ret = - ENOMEM ;
5421
+ goto out_unlock ;
5422
+ }
5397
5423
5398
5424
/* save the old wq_unbound_cpumask. */
5399
5425
cpumask_copy (saved_cpumask , wq_unbound_cpumask );
@@ -5406,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5406
5432
if (ret < 0 )
5407
5433
cpumask_copy (wq_unbound_cpumask , saved_cpumask );
5408
5434
5435
+ free_cpumask_var (saved_cpumask );
5436
+ out_unlock :
5409
5437
apply_wqattrs_unlock ();
5410
5438
}
5411
5439
5412
- free_cpumask_var (saved_cpumask );
5413
5440
return ret ;
5414
5441
}
5415
5442
@@ -5869,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
5869
5896
rcu_read_unlock ();
5870
5897
5871
5898
if (lockup_detected )
5872
- show_workqueue_state ();
5899
+ show_all_workqueues ();
5873
5900
5874
5901
wq_watchdog_reset_touched ();
5875
5902
mod_timer (& wq_watchdog_timer , jiffies + thresh );
0 commit comments