@@ -433,7 +433,7 @@ static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
433
433
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE ;
434
434
435
435
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
436
- static struct workqueue_attrs * wq_update_pod_attrs_buf ;
436
+ static struct workqueue_attrs * unbound_wq_update_pwq_attrs_buf ;
437
437
438
438
static DEFINE_MUTEX (wq_pool_mutex ); /* protects pools and workqueues list */
439
439
static DEFINE_MUTEX (wq_pool_attach_mutex ); /* protects worker attach/detach */
@@ -5341,13 +5341,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
5341
5341
}
5342
5342
5343
5343
/**
5344
- * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
5344
+ * unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
5345
5345
* @wq: the target workqueue
5346
- * @cpu: the CPU to update pool association for
5346
+ * @cpu: the CPU to update the pwq slot for
5347
5347
*
5348
5348
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
5349
- * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
5350
- * @wq accordingly.
5349
+ * %CPU_DOWN_FAILED. @cpu is in the same pod of the CPU being hot[un]plugged.
5351
5350
*
5352
5351
*
5353
5352
* If pod affinity can't be adjusted due to memory allocation failure, it falls
@@ -5360,7 +5359,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
5360
5359
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
5361
5360
* responsibility to flush the work item from CPU_DOWN_PREPARE.
5362
5361
*/
5363
- static void wq_update_pod (struct workqueue_struct * wq , int cpu )
5362
+ static void unbound_wq_update_pwq (struct workqueue_struct * wq , int cpu )
5364
5363
{
5365
5364
struct pool_workqueue * old_pwq = NULL , * pwq ;
5366
5365
struct workqueue_attrs * target_attrs ;
@@ -5375,7 +5374,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu)
5375
5374
* Let's use a preallocated one. The following buf is protected by
5376
5375
* CPU hotplug exclusion.
5377
5376
*/
5378
- target_attrs = wq_update_pod_attrs_buf ;
5377
+ target_attrs = unbound_wq_update_pwq_attrs_buf ;
5379
5378
5380
5379
copy_workqueue_attrs (target_attrs , wq -> unbound_attrs );
5381
5380
wqattrs_actualize_cpumask (target_attrs , wq_unbound_cpumask );
@@ -6581,7 +6580,7 @@ int workqueue_online_cpu(unsigned int cpu)
6581
6580
int tcpu ;
6582
6581
6583
6582
for_each_cpu (tcpu , pt -> pod_cpus [pt -> cpu_pod [cpu ]])
6584
- wq_update_pod (wq , tcpu );
6583
+ unbound_wq_update_pwq (wq , tcpu );
6585
6584
6586
6585
mutex_lock (& wq -> mutex );
6587
6586
wq_update_node_max_active (wq , -1 );
@@ -6616,7 +6615,7 @@ int workqueue_offline_cpu(unsigned int cpu)
6616
6615
int tcpu ;
6617
6616
6618
6617
for_each_cpu (tcpu , pt -> pod_cpus [pt -> cpu_pod [cpu ]])
6619
- wq_update_pod (wq , tcpu );
6618
+ unbound_wq_update_pwq (wq , tcpu );
6620
6619
6621
6620
mutex_lock (& wq -> mutex );
6622
6621
wq_update_node_max_active (wq , cpu );
@@ -6904,9 +6903,8 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
6904
6903
wq_affn_dfl = affn ;
6905
6904
6906
6905
list_for_each_entry (wq , & workqueues , list ) {
6907
- for_each_online_cpu (cpu ) {
6908
- wq_update_pod (wq , cpu );
6909
- }
6906
+ for_each_online_cpu (cpu )
6907
+ unbound_wq_update_pwq (wq , cpu );
6910
6908
}
6911
6909
6912
6910
mutex_unlock (& wq_pool_mutex );
@@ -7653,8 +7651,8 @@ void __init workqueue_init_early(void)
7653
7651
7654
7652
pwq_cache = KMEM_CACHE (pool_workqueue , SLAB_PANIC );
7655
7653
7656
- wq_update_pod_attrs_buf = alloc_workqueue_attrs ();
7657
- BUG_ON (!wq_update_pod_attrs_buf );
7654
+ unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs ();
7655
+ BUG_ON (!unbound_wq_update_pwq_attrs_buf );
7658
7656
7659
7657
/*
7660
7658
* If nohz_full is enabled, set power efficient workqueue as unbound.
@@ -7919,12 +7917,12 @@ void __init workqueue_init_topology(void)
7919
7917
7920
7918
/*
7921
7919
* Workqueues allocated earlier would have all CPUs sharing the default
7922
- * worker pool. Explicitly call wq_update_pod () on all workqueue and CPU
7923
- * combinations to apply per-pod sharing.
7920
+ * worker pool. Explicitly call unbound_wq_update_pwq () on all workqueue
7921
+ * and CPU combinations to apply per-pod sharing.
7924
7922
*/
7925
7923
list_for_each_entry (wq , & workqueues , list ) {
7926
7924
for_each_online_cpu (cpu )
7927
- wq_update_pod (wq , cpu );
7925
+ unbound_wq_update_pwq (wq , cpu );
7928
7926
if (wq -> flags & WQ_UNBOUND ) {
7929
7927
mutex_lock (& wq -> mutex );
7930
7928
wq_update_node_max_active (wq , -1 );
0 commit comments