Skip to content

Commit 55df093

Browse files
imran-knhtejun
authored andcommitted
workqueue: Introduce show_one_worker_pool and show_one_workqueue.
Currently show_workqueue_state shows the state of all workqueues and of all worker pools. In certain cases we may need to dump state of only a specific workqueue or worker pool. For example in destroy_workqueue we only need to show state of the workqueue which is getting destroyed. So rename show_workqueue_state to show_all_workqueues(to signify it dumps state of all busy workqueues) and divide it into more granular functions (show_one_workqueue and show_one_worker_pool), that would show states of individual workqueues and worker pools and can be used in cases such as the one mentioned above. Also, as mentioned earlier, make destroy_workqueue dump data pertaining to only the workqueue that is being destroyed and make user(s) of earlier interface(show_workqueue_state), use new interface (show_all_workqueues). Signed-off-by: Imran Khan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent d25302e commit 55df093

File tree

4 files changed

+100
-79
lines changed

4 files changed

+100
-79
lines changed

drivers/tty/sysrq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
296296
static void sysrq_handle_showstate(int key)
297297
{
298298
show_state();
299-
show_workqueue_state();
299+
show_all_workqueues();
300300
}
301301
static const struct sysrq_key_op sysrq_showstate_op = {
302302
.handler = sysrq_handle_showstate,

include/linux/workqueue.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
469469
extern unsigned int work_busy(struct work_struct *work);
470470
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
471471
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
472-
extern void show_workqueue_state(void);
472+
extern void show_all_workqueues(void);
473+
extern void show_one_workqueue(struct workqueue_struct *wq);
473474
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
474475

475476
/**

kernel/power/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only)
9494
todo - wq_busy, wq_busy);
9595

9696
if (wq_busy)
97-
show_workqueue_state();
97+
show_all_workqueues();
9898

9999
if (!wakeup || pm_debug_messages_on) {
100100
read_lock(&tasklist_lock);

kernel/workqueue.c

Lines changed: 96 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
375375
static int worker_thread(void *__worker);
376376
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
377377
static void show_pwq(struct pool_workqueue *pwq);
378+
static void show_one_worker_pool(struct worker_pool *pool);
378379

379380
#define CREATE_TRACE_POINTS
380381
#include <trace/events/workqueue.h>
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
44474448
raw_spin_unlock_irq(&pwq->pool->lock);
44484449
mutex_unlock(&wq->mutex);
44494450
mutex_unlock(&wq_pool_mutex);
4450-
show_workqueue_state();
4451+
show_one_workqueue(wq);
44514452
return;
44524453
}
44534454
raw_spin_unlock_irq(&pwq->pool->lock);
@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
47974798
}
47984799

47994800
/**
4800-
* show_workqueue_state - dump workqueue state
4801-
*
4802-
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
4803-
* all busy workqueues and pools.
4801+
* show_one_workqueue - dump state of specified workqueue
4802+
* @wq: workqueue whose state will be printed
48044803
*/
4805-
void show_workqueue_state(void)
4804+
void show_one_workqueue(struct workqueue_struct *wq)
48064805
{
4807-
struct workqueue_struct *wq;
4808-
struct worker_pool *pool;
4806+
struct pool_workqueue *pwq;
4807+
bool idle = true;
48094808
unsigned long flags;
4810-
int pi;
4811-
4812-
rcu_read_lock();
48134809

4814-
pr_info("Showing busy workqueues and worker pools:\n");
4815-
4816-
list_for_each_entry_rcu(wq, &workqueues, list) {
4817-
struct pool_workqueue *pwq;
4818-
bool idle = true;
4819-
4820-
for_each_pwq(pwq, wq) {
4821-
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4822-
idle = false;
4823-
break;
4824-
}
4810+
for_each_pwq(pwq, wq) {
4811+
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4812+
idle = false;
4813+
break;
48254814
}
4826-
if (idle)
4827-
continue;
4815+
}
4816+
if (idle) /* Nothing to print for idle workqueue */
4817+
return;
48284818

4829-
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4819+
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
48304820

4831-
for_each_pwq(pwq, wq) {
4832-
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4833-
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4834-
/*
4835-
* Defer printing to avoid deadlocks in console
4836-
* drivers that queue work while holding locks
4837-
* also taken in their write paths.
4838-
*/
4839-
printk_deferred_enter();
4840-
show_pwq(pwq);
4841-
printk_deferred_exit();
4842-
}
4843-
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4821+
for_each_pwq(pwq, wq) {
4822+
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4823+
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
48444824
/*
4845-
* We could be printing a lot from atomic context, e.g.
4846-
* sysrq-t -> show_workqueue_state(). Avoid triggering
4847-
* hard lockup.
4825+
* Defer printing to avoid deadlocks in console
4826+
* drivers that queue work while holding locks
4827+
* also taken in their write paths.
48484828
*/
4849-
touch_nmi_watchdog();
4850-
}
4851-
}
4852-
4853-
for_each_pool(pool, pi) {
4854-
struct worker *worker;
4855-
bool first = true;
4856-
4857-
raw_spin_lock_irqsave(&pool->lock, flags);
4858-
if (pool->nr_workers == pool->nr_idle)
4859-
goto next_pool;
4860-
/*
4861-
* Defer printing to avoid deadlocks in console drivers that
4862-
* queue work while holding locks also taken in their write
4863-
* paths.
4864-
*/
4865-
printk_deferred_enter();
4866-
pr_info("pool %d:", pool->id);
4867-
pr_cont_pool_info(pool);
4868-
pr_cont(" hung=%us workers=%d",
4869-
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4870-
pool->nr_workers);
4871-
if (pool->manager)
4872-
pr_cont(" manager: %d",
4873-
task_pid_nr(pool->manager->task));
4874-
list_for_each_entry(worker, &pool->idle_list, entry) {
4875-
pr_cont(" %s%d", first ? "idle: " : "",
4876-
task_pid_nr(worker->task));
4877-
first = false;
4829+
printk_deferred_enter();
4830+
show_pwq(pwq);
4831+
printk_deferred_exit();
48784832
}
4879-
pr_cont("\n");
4880-
printk_deferred_exit();
4881-
next_pool:
4882-
raw_spin_unlock_irqrestore(&pool->lock, flags);
4833+
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
48834834
/*
48844835
* We could be printing a lot from atomic context, e.g.
4885-
* sysrq-t -> show_workqueue_state(). Avoid triggering
4836+
* sysrq-t -> show_all_workqueues(). Avoid triggering
48864837
* hard lockup.
48874838
*/
48884839
touch_nmi_watchdog();
48894840
}
48904841

4842+
}
4843+
4844+
/**
4845+
* show_one_worker_pool - dump state of specified worker pool
4846+
* @pool: worker pool whose state will be printed
4847+
*/
4848+
static void show_one_worker_pool(struct worker_pool *pool)
4849+
{
4850+
struct worker *worker;
4851+
bool first = true;
4852+
unsigned long flags;
4853+
4854+
raw_spin_lock_irqsave(&pool->lock, flags);
4855+
if (pool->nr_workers == pool->nr_idle)
4856+
goto next_pool;
4857+
/*
4858+
* Defer printing to avoid deadlocks in console drivers that
4859+
* queue work while holding locks also taken in their write
4860+
* paths.
4861+
*/
4862+
printk_deferred_enter();
4863+
pr_info("pool %d:", pool->id);
4864+
pr_cont_pool_info(pool);
4865+
pr_cont(" hung=%us workers=%d",
4866+
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4867+
pool->nr_workers);
4868+
if (pool->manager)
4869+
pr_cont(" manager: %d",
4870+
task_pid_nr(pool->manager->task));
4871+
list_for_each_entry(worker, &pool->idle_list, entry) {
4872+
pr_cont(" %s%d", first ? "idle: " : "",
4873+
task_pid_nr(worker->task));
4874+
first = false;
4875+
}
4876+
pr_cont("\n");
4877+
printk_deferred_exit();
4878+
next_pool:
4879+
raw_spin_unlock_irqrestore(&pool->lock, flags);
4880+
/*
4881+
* We could be printing a lot from atomic context, e.g.
4882+
* sysrq-t -> show_all_workqueues(). Avoid triggering
4883+
* hard lockup.
4884+
*/
4885+
touch_nmi_watchdog();
4886+
4887+
}
4888+
4889+
/**
4890+
* show_all_workqueues - dump workqueue state
4891+
*
4892+
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
4893+
* all busy workqueues and pools.
4894+
*/
4895+
void show_all_workqueues(void)
4896+
{
4897+
struct workqueue_struct *wq;
4898+
struct worker_pool *pool;
4899+
int pi;
4900+
4901+
rcu_read_lock();
4902+
4903+
pr_info("Showing busy workqueues and worker pools:\n");
4904+
4905+
list_for_each_entry_rcu(wq, &workqueues, list)
4906+
show_one_workqueue(wq);
4907+
4908+
for_each_pool(pool, pi)
4909+
show_one_worker_pool(pool);
4910+
48914911
rcu_read_unlock();
48924912
}
48934913

@@ -5876,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
58765896
rcu_read_unlock();
58775897

58785898
if (lockup_detected)
5879-
show_workqueue_state();
5899+
show_all_workqueues();
58805900

58815901
wq_watchdog_reset_touched();
58825902
mod_timer(&wq_watchdog_timer, jiffies + thresh);

0 commit comments

Comments
 (0)