Skip to content

Commit 85a77db

Browse files
committed
Merge tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "Nothing major: - workqueue.panic_on_stall boot param added - alloc_workqueue_lockdep_map() added (used by DRM) - Other cleanusp and doc updates" * tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: kernel/workqueue.c: fix DEFINE_PER_CPU_SHARED_ALIGNED expansion workqueue: Fix another htmldocs build warning workqueue: fix null-ptr-deref on __alloc_workqueue() error workqueue: Don't call va_start / va_end twice workqueue: Fix htmldocs build warning workqueue: Add interface for user-defined workqueue lockdep map workqueue: Change workqueue lockdep map to pointer workqueue: Split alloc_workqueue into internal function and lockdep init Documentation: kernel-parameters: add workqueue.panic_on_stall workqueue: add cmdline parameter workqueue.panic_on_stall
2 parents 78567e2 + b4722b8 commit 85a77db

File tree

3 files changed

+130
-25
lines changed

3 files changed

+130
-25
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7386,6 +7386,13 @@
73867386
it can be updated at runtime by writing to the
73877387
corresponding sysfs file.
73887388

7389+
workqueue.panic_on_stall=<uint>
7390+
Panic when workqueue stall is detected by
7391+
CONFIG_WQ_WATCHDOG. It sets the number times of the
7392+
stall to trigger panic.
7393+
7394+
The default is 0, which disables the panic on stall.
7395+
73897396
workqueue.cpu_intensive_thresh_us=
73907397
Per-cpu work items which run for longer than this
73917398
threshold are automatically considered CPU intensive

include/linux/workqueue.h

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu);
507507
__printf(1, 4) struct workqueue_struct *
508508
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
509509

510+
#ifdef CONFIG_LOCKDEP
511+
/**
512+
* alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
513+
* @fmt: printf format for the name of the workqueue
514+
* @flags: WQ_* flags
515+
* @max_active: max in-flight work items, 0 for default
516+
* @lockdep_map: user-defined lockdep_map
517+
* @...: args for @fmt
518+
*
519+
* Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
520+
* workqueues created with the same purpose and to avoid leaking a lockdep_map
521+
* on each workqueue creation.
522+
*
523+
* RETURNS:
524+
* Pointer to the allocated workqueue on success, %NULL on failure.
525+
*/
526+
__printf(1, 5) struct workqueue_struct *
527+
alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
528+
struct lockdep_map *lockdep_map, ...);
529+
530+
/**
531+
* alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
532+
* user-defined lockdep_map
533+
*
534+
* @fmt: printf format for the name of the workqueue
535+
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
536+
* @lockdep_map: user-defined lockdep_map
537+
* @args: args for @fmt
538+
*
539+
* Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
540+
* Useful for workqueues created with the same purpose and to avoid leaking a
541+
* lockdep_map on each workqueue creation.
542+
*
543+
* RETURNS:
544+
* Pointer to the allocated workqueue on success, %NULL on failure.
545+
*/
546+
#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
547+
alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
548+
1, lockdep_map, ##args)
549+
#endif
550+
510551
/**
511552
* alloc_ordered_workqueue - allocate an ordered workqueue
512553
* @fmt: printf format for the name of the workqueue

kernel/workqueue.c

Lines changed: 82 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,8 @@ struct workqueue_struct {
364364
#ifdef CONFIG_LOCKDEP
365365
char *lock_name;
366366
struct lock_class_key key;
367-
struct lockdep_map lockdep_map;
367+
struct lockdep_map __lockdep_map;
368+
struct lockdep_map *lockdep_map;
368369
#endif
369370
char name[WQ_NAME_LEN]; /* I: workqueue name */
370371

@@ -476,16 +477,13 @@ static bool wq_debug_force_rr_cpu = false;
476477
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
477478

478479
/* to raise softirq for the BH worker pools on other CPUs */
479-
static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
480-
bh_pool_irq_works);
480+
static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], bh_pool_irq_works);
481481

482482
/* the BH worker pools */
483-
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
484-
bh_worker_pools);
483+
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools);
485484

486485
/* the per-cpu worker pools */
487-
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
488-
cpu_worker_pools);
486+
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
489487

490488
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
491489

@@ -3203,7 +3201,7 @@ __acquires(&pool->lock)
32033201
lockdep_start_depth = lockdep_depth(current);
32043202
/* see drain_dead_softirq_workfn() */
32053203
if (!bh_draining)
3206-
lock_map_acquire(&pwq->wq->lockdep_map);
3204+
lock_map_acquire(pwq->wq->lockdep_map);
32073205
lock_map_acquire(&lockdep_map);
32083206
/*
32093207
* Strictly speaking we should mark the invariant state without holding
@@ -3237,7 +3235,7 @@ __acquires(&pool->lock)
32373235
pwq->stats[PWQ_STAT_COMPLETED]++;
32383236
lock_map_release(&lockdep_map);
32393237
if (!bh_draining)
3240-
lock_map_release(&pwq->wq->lockdep_map);
3238+
lock_map_release(pwq->wq->lockdep_map);
32413239

32423240
if (unlikely((worker->task && in_atomic()) ||
32433241
lockdep_depth(current) != lockdep_start_depth ||
@@ -3873,11 +3871,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
38733871
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
38743872
{
38753873
#ifdef CONFIG_LOCKDEP
3874+
if (unlikely(!wq->lockdep_map))
3875+
return;
3876+
38763877
if (wq->flags & WQ_BH)
38773878
local_bh_disable();
38783879

3879-
lock_map_acquire(&wq->lockdep_map);
3880-
lock_map_release(&wq->lockdep_map);
3880+
lock_map_acquire(wq->lockdep_map);
3881+
lock_map_release(wq->lockdep_map);
38813882

38823883
if (wq->flags & WQ_BH)
38833884
local_bh_enable();
@@ -3911,7 +3912,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
39113912
struct wq_flusher this_flusher = {
39123913
.list = LIST_HEAD_INIT(this_flusher.list),
39133914
.flush_color = -1,
3914-
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3915+
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, (*wq->lockdep_map)),
39153916
};
39163917
int next_color;
39173918

@@ -4776,16 +4777,23 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
47764777
lock_name = wq->name;
47774778

47784779
wq->lock_name = lock_name;
4779-
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
4780+
wq->lockdep_map = &wq->__lockdep_map;
4781+
lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0);
47804782
}
47814783

47824784
static void wq_unregister_lockdep(struct workqueue_struct *wq)
47834785
{
4786+
if (wq->lockdep_map != &wq->__lockdep_map)
4787+
return;
4788+
47844789
lockdep_unregister_key(&wq->key);
47854790
}
47864791

47874792
static void wq_free_lockdep(struct workqueue_struct *wq)
47884793
{
4794+
if (wq->lockdep_map != &wq->__lockdep_map)
4795+
return;
4796+
47894797
if (wq->lock_name != wq->name)
47904798
kfree(wq->lock_name);
47914799
}
@@ -5619,12 +5627,10 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
56195627
} while (activated);
56205628
}
56215629

5622-
__printf(1, 4)
5623-
struct workqueue_struct *alloc_workqueue(const char *fmt,
5624-
unsigned int flags,
5625-
int max_active, ...)
5630+
static struct workqueue_struct *__alloc_workqueue(const char *fmt,
5631+
unsigned int flags,
5632+
int max_active, va_list args)
56265633
{
5627-
va_list args;
56285634
struct workqueue_struct *wq;
56295635
size_t wq_size;
56305636
int name_len;
@@ -5656,9 +5662,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
56565662
goto err_free_wq;
56575663
}
56585664

5659-
va_start(args, max_active);
56605665
name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
5661-
va_end(args);
56625666

56635667
if (name_len >= WQ_NAME_LEN)
56645668
pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
@@ -5688,12 +5692,11 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
56885692
INIT_LIST_HEAD(&wq->flusher_overflow);
56895693
INIT_LIST_HEAD(&wq->maydays);
56905694

5691-
wq_init_lockdep(wq);
56925695
INIT_LIST_HEAD(&wq->list);
56935696

56945697
if (flags & WQ_UNBOUND) {
56955698
if (alloc_node_nr_active(wq->node_nr_active) < 0)
5696-
goto err_unreg_lockdep;
5699+
goto err_free_wq;
56975700
}
56985701

56995702
/*
@@ -5732,9 +5735,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
57325735
kthread_flush_worker(pwq_release_worker);
57335736
free_node_nr_active(wq->node_nr_active);
57345737
}
5735-
err_unreg_lockdep:
5736-
wq_unregister_lockdep(wq);
5737-
wq_free_lockdep(wq);
57385738
err_free_wq:
57395739
free_workqueue_attrs(wq->unbound_attrs);
57405740
kfree(wq);
@@ -5745,8 +5745,49 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
57455745
destroy_workqueue(wq);
57465746
return NULL;
57475747
}
5748+
5749+
__printf(1, 4)
5750+
struct workqueue_struct *alloc_workqueue(const char *fmt,
5751+
unsigned int flags,
5752+
int max_active, ...)
5753+
{
5754+
struct workqueue_struct *wq;
5755+
va_list args;
5756+
5757+
va_start(args, max_active);
5758+
wq = __alloc_workqueue(fmt, flags, max_active, args);
5759+
va_end(args);
5760+
if (!wq)
5761+
return NULL;
5762+
5763+
wq_init_lockdep(wq);
5764+
5765+
return wq;
5766+
}
57485767
EXPORT_SYMBOL_GPL(alloc_workqueue);
57495768

5769+
#ifdef CONFIG_LOCKDEP
5770+
__printf(1, 5)
5771+
struct workqueue_struct *
5772+
alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags,
5773+
int max_active, struct lockdep_map *lockdep_map, ...)
5774+
{
5775+
struct workqueue_struct *wq;
5776+
va_list args;
5777+
5778+
va_start(args, lockdep_map);
5779+
wq = __alloc_workqueue(fmt, flags, max_active, args);
5780+
va_end(args);
5781+
if (!wq)
5782+
return NULL;
5783+
5784+
wq->lockdep_map = lockdep_map;
5785+
5786+
return wq;
5787+
}
5788+
EXPORT_SYMBOL_GPL(alloc_workqueue_lockdep_map);
5789+
#endif
5790+
57505791
static bool pwq_busy(struct pool_workqueue *pwq)
57515792
{
57525793
int i;
@@ -7406,6 +7447,9 @@ static struct timer_list wq_watchdog_timer;
74067447
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
74077448
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
74087449

7450+
static unsigned int wq_panic_on_stall;
7451+
module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
7452+
74097453
/*
74107454
* Show workers that might prevent the processing of pending work items.
74117455
* The only candidates are CPU-bound workers in the running state.
@@ -7457,6 +7501,16 @@ static void show_cpu_pools_hogs(void)
74577501
rcu_read_unlock();
74587502
}
74597503

7504+
static void panic_on_wq_watchdog(void)
7505+
{
7506+
static unsigned int wq_stall;
7507+
7508+
if (wq_panic_on_stall) {
7509+
wq_stall++;
7510+
BUG_ON(wq_stall >= wq_panic_on_stall);
7511+
}
7512+
}
7513+
74607514
static void wq_watchdog_reset_touched(void)
74617515
{
74627516
int cpu;
@@ -7529,6 +7583,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
75297583
if (cpu_pool_stall)
75307584
show_cpu_pools_hogs();
75317585

7586+
if (lockup_detected)
7587+
panic_on_wq_watchdog();
7588+
75327589
wq_watchdog_reset_touched();
75337590
mod_timer(&wq_watchdog_timer, jiffies + thresh);
75347591
}

0 commit comments

Comments
 (0)