@@ -364,7 +364,8 @@ struct workqueue_struct {
364
364
#ifdef CONFIG_LOCKDEP
365
365
char * lock_name ;
366
366
struct lock_class_key key ;
367
- struct lockdep_map lockdep_map ;
367
+ struct lockdep_map __lockdep_map ;
368
+ struct lockdep_map * lockdep_map ;
368
369
#endif
369
370
char name [WQ_NAME_LEN ]; /* I: workqueue name */
370
371
@@ -476,16 +477,13 @@ static bool wq_debug_force_rr_cpu = false;
476
477
module_param_named (debug_force_rr_cpu , wq_debug_force_rr_cpu , bool , 0644 );
477
478
478
479
/* to raise softirq for the BH worker pools on other CPUs */
479
- static DEFINE_PER_CPU_SHARED_ALIGNED (struct irq_work [NR_STD_WORKER_POOLS ],
480
- bh_pool_irq_works ) ;
480
+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct irq_work [NR_STD_WORKER_POOLS ], bh_pool_irq_works ) ;
481
481
482
482
/* the BH worker pools */
483
- static DEFINE_PER_CPU_SHARED_ALIGNED (struct worker_pool [NR_STD_WORKER_POOLS ],
484
- bh_worker_pools ) ;
483
+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct worker_pool [NR_STD_WORKER_POOLS ], bh_worker_pools ) ;
485
484
486
485
/* the per-cpu worker pools */
487
- static DEFINE_PER_CPU_SHARED_ALIGNED (struct worker_pool [NR_STD_WORKER_POOLS ],
488
- cpu_worker_pools ) ;
486
+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct worker_pool [NR_STD_WORKER_POOLS ], cpu_worker_pools ) ;
489
487
490
488
static DEFINE_IDR (worker_pool_idr ); /* PR: idr of all pools */
491
489
@@ -3203,7 +3201,7 @@ __acquires(&pool->lock)
3203
3201
lockdep_start_depth = lockdep_depth (current );
3204
3202
/* see drain_dead_softirq_workfn() */
3205
3203
if (!bh_draining )
3206
- lock_map_acquire (& pwq -> wq -> lockdep_map );
3204
+ lock_map_acquire (pwq -> wq -> lockdep_map );
3207
3205
lock_map_acquire (& lockdep_map );
3208
3206
/*
3209
3207
* Strictly speaking we should mark the invariant state without holding
@@ -3237,7 +3235,7 @@ __acquires(&pool->lock)
3237
3235
pwq -> stats [PWQ_STAT_COMPLETED ]++ ;
3238
3236
lock_map_release (& lockdep_map );
3239
3237
if (!bh_draining )
3240
- lock_map_release (& pwq -> wq -> lockdep_map );
3238
+ lock_map_release (pwq -> wq -> lockdep_map );
3241
3239
3242
3240
if (unlikely ((worker -> task && in_atomic ()) ||
3243
3241
lockdep_depth (current ) != lockdep_start_depth ||
@@ -3873,11 +3871,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3873
3871
static void touch_wq_lockdep_map (struct workqueue_struct * wq )
3874
3872
{
3875
3873
#ifdef CONFIG_LOCKDEP
3874
+ if (unlikely (!wq -> lockdep_map ))
3875
+ return ;
3876
+
3876
3877
if (wq -> flags & WQ_BH )
3877
3878
local_bh_disable ();
3878
3879
3879
- lock_map_acquire (& wq -> lockdep_map );
3880
- lock_map_release (& wq -> lockdep_map );
3880
+ lock_map_acquire (wq -> lockdep_map );
3881
+ lock_map_release (wq -> lockdep_map );
3881
3882
3882
3883
if (wq -> flags & WQ_BH )
3883
3884
local_bh_enable ();
@@ -3911,7 +3912,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
3911
3912
struct wq_flusher this_flusher = {
3912
3913
.list = LIST_HEAD_INIT (this_flusher .list ),
3913
3914
.flush_color = -1 ,
3914
- .done = COMPLETION_INITIALIZER_ONSTACK_MAP (this_flusher .done , wq -> lockdep_map ),
3915
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP (this_flusher .done , ( * wq -> lockdep_map ) ),
3915
3916
};
3916
3917
int next_color ;
3917
3918
@@ -4776,16 +4777,23 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
4776
4777
lock_name = wq -> name ;
4777
4778
4778
4779
wq -> lock_name = lock_name ;
4779
- lockdep_init_map (& wq -> lockdep_map , lock_name , & wq -> key , 0 );
4780
+ wq -> lockdep_map = & wq -> __lockdep_map ;
4781
+ lockdep_init_map (wq -> lockdep_map , lock_name , & wq -> key , 0 );
4780
4782
}
4781
4783
4782
4784
static void wq_unregister_lockdep (struct workqueue_struct * wq )
4783
4785
{
4786
+ if (wq -> lockdep_map != & wq -> __lockdep_map )
4787
+ return ;
4788
+
4784
4789
lockdep_unregister_key (& wq -> key );
4785
4790
}
4786
4791
4787
4792
static void wq_free_lockdep (struct workqueue_struct * wq )
4788
4793
{
4794
+ if (wq -> lockdep_map != & wq -> __lockdep_map )
4795
+ return ;
4796
+
4789
4797
if (wq -> lock_name != wq -> name )
4790
4798
kfree (wq -> lock_name );
4791
4799
}
@@ -5619,12 +5627,10 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
5619
5627
} while (activated );
5620
5628
}
5621
5629
5622
- __printf (1 , 4 )
5623
- struct workqueue_struct * alloc_workqueue (const char * fmt ,
5624
- unsigned int flags ,
5625
- int max_active , ...)
5630
+ static struct workqueue_struct * __alloc_workqueue (const char * fmt ,
5631
+ unsigned int flags ,
5632
+ int max_active , va_list args )
5626
5633
{
5627
- va_list args ;
5628
5634
struct workqueue_struct * wq ;
5629
5635
size_t wq_size ;
5630
5636
int name_len ;
@@ -5656,9 +5662,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
5656
5662
goto err_free_wq ;
5657
5663
}
5658
5664
5659
- va_start (args , max_active );
5660
5665
name_len = vsnprintf (wq -> name , sizeof (wq -> name ), fmt , args );
5661
- va_end (args );
5662
5666
5663
5667
if (name_len >= WQ_NAME_LEN )
5664
5668
pr_warn_once ("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n" ,
@@ -5688,12 +5692,11 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
5688
5692
INIT_LIST_HEAD (& wq -> flusher_overflow );
5689
5693
INIT_LIST_HEAD (& wq -> maydays );
5690
5694
5691
- wq_init_lockdep (wq );
5692
5695
INIT_LIST_HEAD (& wq -> list );
5693
5696
5694
5697
if (flags & WQ_UNBOUND ) {
5695
5698
if (alloc_node_nr_active (wq -> node_nr_active ) < 0 )
5696
- goto err_unreg_lockdep ;
5699
+ goto err_free_wq ;
5697
5700
}
5698
5701
5699
5702
/*
@@ -5732,9 +5735,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
5732
5735
kthread_flush_worker (pwq_release_worker );
5733
5736
free_node_nr_active (wq -> node_nr_active );
5734
5737
}
5735
- err_unreg_lockdep :
5736
- wq_unregister_lockdep (wq );
5737
- wq_free_lockdep (wq );
5738
5738
err_free_wq :
5739
5739
free_workqueue_attrs (wq -> unbound_attrs );
5740
5740
kfree (wq );
@@ -5745,8 +5745,49 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
5745
5745
destroy_workqueue (wq );
5746
5746
return NULL ;
5747
5747
}
5748
+
5749
+ __printf (1 , 4 )
5750
+ struct workqueue_struct * alloc_workqueue (const char * fmt ,
5751
+ unsigned int flags ,
5752
+ int max_active , ...)
5753
+ {
5754
+ struct workqueue_struct * wq ;
5755
+ va_list args ;
5756
+
5757
+ va_start (args , max_active );
5758
+ wq = __alloc_workqueue (fmt , flags , max_active , args );
5759
+ va_end (args );
5760
+ if (!wq )
5761
+ return NULL ;
5762
+
5763
+ wq_init_lockdep (wq );
5764
+
5765
+ return wq ;
5766
+ }
5748
5767
EXPORT_SYMBOL_GPL (alloc_workqueue );
5749
5768
5769
+ #ifdef CONFIG_LOCKDEP
5770
+ __printf (1 , 5 )
5771
+ struct workqueue_struct *
5772
+ alloc_workqueue_lockdep_map (const char * fmt , unsigned int flags ,
5773
+ int max_active , struct lockdep_map * lockdep_map , ...)
5774
+ {
5775
+ struct workqueue_struct * wq ;
5776
+ va_list args ;
5777
+
5778
+ va_start (args , lockdep_map );
5779
+ wq = __alloc_workqueue (fmt , flags , max_active , args );
5780
+ va_end (args );
5781
+ if (!wq )
5782
+ return NULL ;
5783
+
5784
+ wq -> lockdep_map = lockdep_map ;
5785
+
5786
+ return wq ;
5787
+ }
5788
+ EXPORT_SYMBOL_GPL (alloc_workqueue_lockdep_map );
5789
+ #endif
5790
+
5750
5791
static bool pwq_busy (struct pool_workqueue * pwq )
5751
5792
{
5752
5793
int i ;
@@ -7406,6 +7447,9 @@ static struct timer_list wq_watchdog_timer;
7406
7447
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES ;
7407
7448
static DEFINE_PER_CPU (unsigned long, wq_watchdog_touched_cpu ) = INITIAL_JIFFIES ;
7408
7449
7450
+ static unsigned int wq_panic_on_stall ;
7451
+ module_param_named (panic_on_stall , wq_panic_on_stall , uint , 0644 );
7452
+
7409
7453
/*
7410
7454
* Show workers that might prevent the processing of pending work items.
7411
7455
* The only candidates are CPU-bound workers in the running state.
@@ -7457,6 +7501,16 @@ static void show_cpu_pools_hogs(void)
7457
7501
rcu_read_unlock ();
7458
7502
}
7459
7503
7504
+ static void panic_on_wq_watchdog (void )
7505
+ {
7506
+ static unsigned int wq_stall ;
7507
+
7508
+ if (wq_panic_on_stall ) {
7509
+ wq_stall ++ ;
7510
+ BUG_ON (wq_stall >= wq_panic_on_stall );
7511
+ }
7512
+ }
7513
+
7460
7514
static void wq_watchdog_reset_touched (void )
7461
7515
{
7462
7516
int cpu ;
@@ -7529,6 +7583,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
7529
7583
if (cpu_pool_stall )
7530
7584
show_cpu_pools_hogs ();
7531
7585
7586
+ if (lockup_detected )
7587
+ panic_on_wq_watchdog ();
7588
+
7532
7589
wq_watchdog_reset_touched ();
7533
7590
mod_timer (& wq_watchdog_timer , jiffies + thresh );
7534
7591
}
0 commit comments