Skip to content

Commit 4075409

Browse files
committed
Merge branch 'for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "Nothing too interesting. An optimization to short-circuit noop cpumask updates, debug dump code reorg, and doc update" * 'for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: doc: Call out the non-reentrance conditions workqueue: Introduce show_one_worker_pool and show_one_workqueue. workqueue: make sysfs of unbound kworker cpumask more clever
2 parents bba7d68 + f9eaaa8 commit 4075409

File tree

5 files changed

+128
-87
lines changed

5 files changed

+128
-87
lines changed

Documentation/core-api/workqueue.rst

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -216,10 +216,6 @@ resources, scheduled and executed.
216216

217217
This flag is meaningless for unbound wq.
218218

219-
Note that the flag ``WQ_NON_REENTRANT`` no longer exists as all
220-
workqueues are now non-reentrant - any work item is guaranteed to be
221-
executed by at most one worker system-wide at any given time.
222-
223219

224220
``max_active``
225221
--------------
@@ -391,6 +387,23 @@ the stack trace of the offending worker thread. ::
391387
The work item's function should be trivially visible in the stack
392388
trace.
393389

390+
Non-reentrance Conditions
391+
=========================
392+
393+
Workqueue guarantees that a work item cannot be re-entrant if the following
394+
conditions hold after a work item gets queued:
395+
396+
1. The work function hasn't been changed.
397+
2. No one queues the work item to another workqueue.
398+
3. The work item hasn't been reinitiated.
399+
400+
In other words, if the above conditions hold, the work item is guaranteed to be
401+
executed by at most one worker system-wide at any given time.
402+
403+
Note that requeuing the work item (to the same queue) in the self function
404+
doesn't break these conditions, so it's safe to do. Otherwise, caution is
405+
required when breaking the conditions inside a work function.
406+
394407

395408
Kernel Inline Documentations Reference
396409
======================================

drivers/tty/sysrq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
296296
static void sysrq_handle_showstate(int key)
297297
{
298298
show_state();
299-
show_workqueue_state();
299+
show_all_workqueues();
300300
}
301301
static const struct sysrq_key_op sysrq_showstate_op = {
302302
.handler = sysrq_handle_showstate,

include/linux/workqueue.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
469469
extern unsigned int work_busy(struct work_struct *work);
470470
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
471471
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
472-
extern void show_workqueue_state(void);
472+
extern void show_all_workqueues(void);
473+
extern void show_one_workqueue(struct workqueue_struct *wq);
473474
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
474475

475476
/**

kernel/power/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only)
9494
todo - wq_busy, wq_busy);
9595

9696
if (wq_busy)
97-
show_workqueue_state();
97+
show_all_workqueues();
9898

9999
if (!wakeup || pm_debug_messages_on) {
100100
read_lock(&tasklist_lock);

kernel/workqueue.c

Lines changed: 107 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
375375
static int worker_thread(void *__worker);
376376
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
377377
static void show_pwq(struct pool_workqueue *pwq);
378+
static void show_one_worker_pool(struct worker_pool *pool);
378379

379380
#define CREATE_TRACE_POINTS
380381
#include <trace/events/workqueue.h>
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
44474448
raw_spin_unlock_irq(&pwq->pool->lock);
44484449
mutex_unlock(&wq->mutex);
44494450
mutex_unlock(&wq_pool_mutex);
4450-
show_workqueue_state();
4451+
show_one_workqueue(wq);
44514452
return;
44524453
}
44534454
raw_spin_unlock_irq(&pwq->pool->lock);
@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
47974798
}
47984799

47994800
/**
4800-
* show_workqueue_state - dump workqueue state
4801-
*
4802-
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
4803-
* all busy workqueues and pools.
4801+
* show_one_workqueue - dump state of specified workqueue
4802+
* @wq: workqueue whose state will be printed
48044803
*/
4805-
void show_workqueue_state(void)
4804+
void show_one_workqueue(struct workqueue_struct *wq)
48064805
{
4807-
struct workqueue_struct *wq;
4808-
struct worker_pool *pool;
4806+
struct pool_workqueue *pwq;
4807+
bool idle = true;
48094808
unsigned long flags;
4810-
int pi;
4811-
4812-
rcu_read_lock();
4813-
4814-
pr_info("Showing busy workqueues and worker pools:\n");
4815-
4816-
list_for_each_entry_rcu(wq, &workqueues, list) {
4817-
struct pool_workqueue *pwq;
4818-
bool idle = true;
48194809

4820-
for_each_pwq(pwq, wq) {
4821-
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4822-
idle = false;
4823-
break;
4824-
}
4810+
for_each_pwq(pwq, wq) {
4811+
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4812+
idle = false;
4813+
break;
48254814
}
4826-
if (idle)
4827-
continue;
4815+
}
4816+
if (idle) /* Nothing to print for idle workqueue */
4817+
return;
48284818

4829-
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4819+
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
48304820

4831-
for_each_pwq(pwq, wq) {
4832-
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4833-
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4834-
/*
4835-
* Defer printing to avoid deadlocks in console
4836-
* drivers that queue work while holding locks
4837-
* also taken in their write paths.
4838-
*/
4839-
printk_deferred_enter();
4840-
show_pwq(pwq);
4841-
printk_deferred_exit();
4842-
}
4843-
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4821+
for_each_pwq(pwq, wq) {
4822+
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4823+
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
48444824
/*
4845-
* We could be printing a lot from atomic context, e.g.
4846-
* sysrq-t -> show_workqueue_state(). Avoid triggering
4847-
* hard lockup.
4825+
* Defer printing to avoid deadlocks in console
4826+
* drivers that queue work while holding locks
4827+
* also taken in their write paths.
48484828
*/
4849-
touch_nmi_watchdog();
4850-
}
4851-
}
4852-
4853-
for_each_pool(pool, pi) {
4854-
struct worker *worker;
4855-
bool first = true;
4856-
4857-
raw_spin_lock_irqsave(&pool->lock, flags);
4858-
if (pool->nr_workers == pool->nr_idle)
4859-
goto next_pool;
4860-
/*
4861-
* Defer printing to avoid deadlocks in console drivers that
4862-
* queue work while holding locks also taken in their write
4863-
* paths.
4864-
*/
4865-
printk_deferred_enter();
4866-
pr_info("pool %d:", pool->id);
4867-
pr_cont_pool_info(pool);
4868-
pr_cont(" hung=%us workers=%d",
4869-
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4870-
pool->nr_workers);
4871-
if (pool->manager)
4872-
pr_cont(" manager: %d",
4873-
task_pid_nr(pool->manager->task));
4874-
list_for_each_entry(worker, &pool->idle_list, entry) {
4875-
pr_cont(" %s%d", first ? "idle: " : "",
4876-
task_pid_nr(worker->task));
4877-
first = false;
4829+
printk_deferred_enter();
4830+
show_pwq(pwq);
4831+
printk_deferred_exit();
48784832
}
4879-
pr_cont("\n");
4880-
printk_deferred_exit();
4881-
next_pool:
4882-
raw_spin_unlock_irqrestore(&pool->lock, flags);
4833+
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
48834834
/*
48844835
* We could be printing a lot from atomic context, e.g.
4885-
* sysrq-t -> show_workqueue_state(). Avoid triggering
4836+
* sysrq-t -> show_all_workqueues(). Avoid triggering
48864837
* hard lockup.
48874838
*/
48884839
touch_nmi_watchdog();
48894840
}
48904841

4842+
}
4843+
4844+
/**
4845+
* show_one_worker_pool - dump state of specified worker pool
4846+
* @pool: worker pool whose state will be printed
4847+
*/
4848+
static void show_one_worker_pool(struct worker_pool *pool)
4849+
{
4850+
struct worker *worker;
4851+
bool first = true;
4852+
unsigned long flags;
4853+
4854+
raw_spin_lock_irqsave(&pool->lock, flags);
4855+
if (pool->nr_workers == pool->nr_idle)
4856+
goto next_pool;
4857+
/*
4858+
* Defer printing to avoid deadlocks in console drivers that
4859+
* queue work while holding locks also taken in their write
4860+
* paths.
4861+
*/
4862+
printk_deferred_enter();
4863+
pr_info("pool %d:", pool->id);
4864+
pr_cont_pool_info(pool);
4865+
pr_cont(" hung=%us workers=%d",
4866+
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4867+
pool->nr_workers);
4868+
if (pool->manager)
4869+
pr_cont(" manager: %d",
4870+
task_pid_nr(pool->manager->task));
4871+
list_for_each_entry(worker, &pool->idle_list, entry) {
4872+
pr_cont(" %s%d", first ? "idle: " : "",
4873+
task_pid_nr(worker->task));
4874+
first = false;
4875+
}
4876+
pr_cont("\n");
4877+
printk_deferred_exit();
4878+
next_pool:
4879+
raw_spin_unlock_irqrestore(&pool->lock, flags);
4880+
/*
4881+
* We could be printing a lot from atomic context, e.g.
4882+
* sysrq-t -> show_all_workqueues(). Avoid triggering
4883+
* hard lockup.
4884+
*/
4885+
touch_nmi_watchdog();
4886+
4887+
}
4888+
4889+
/**
4890+
* show_all_workqueues - dump workqueue state
4891+
*
4892+
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
4893+
* all busy workqueues and pools.
4894+
*/
4895+
void show_all_workqueues(void)
4896+
{
4897+
struct workqueue_struct *wq;
4898+
struct worker_pool *pool;
4899+
int pi;
4900+
4901+
rcu_read_lock();
4902+
4903+
pr_info("Showing busy workqueues and worker pools:\n");
4904+
4905+
list_for_each_entry_rcu(wq, &workqueues, list)
4906+
show_one_workqueue(wq);
4907+
4908+
for_each_pool(pool, pi)
4909+
show_one_worker_pool(pool);
4910+
48914911
rcu_read_unlock();
48924912
}
48934913

@@ -5384,16 +5404,22 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
53845404
int ret = -EINVAL;
53855405
cpumask_var_t saved_cpumask;
53865406

5387-
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5388-
return -ENOMEM;
5389-
53905407
/*
53915408
* Not excluding isolated cpus on purpose.
53925409
* If the user wishes to include them, we allow that.
53935410
*/
53945411
cpumask_and(cpumask, cpumask, cpu_possible_mask);
53955412
if (!cpumask_empty(cpumask)) {
53965413
apply_wqattrs_lock();
5414+
if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5415+
ret = 0;
5416+
goto out_unlock;
5417+
}
5418+
5419+
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
5420+
ret = -ENOMEM;
5421+
goto out_unlock;
5422+
}
53975423

53985424
/* save the old wq_unbound_cpumask. */
53995425
cpumask_copy(saved_cpumask, wq_unbound_cpumask);
@@ -5406,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
54065432
if (ret < 0)
54075433
cpumask_copy(wq_unbound_cpumask, saved_cpumask);
54085434

5435+
free_cpumask_var(saved_cpumask);
5436+
out_unlock:
54095437
apply_wqattrs_unlock();
54105438
}
54115439

5412-
free_cpumask_var(saved_cpumask);
54135440
return ret;
54145441
}
54155442

@@ -5869,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
58695896
rcu_read_unlock();
58705897

58715898
if (lockup_detected)
5872-
show_workqueue_state();
5899+
show_all_workqueues();
58735900

58745901
wq_watchdog_reset_touched();
58755902
mod_timer(&wq_watchdog_timer, jiffies + thresh);

0 commit comments

Comments
 (0)