Skip to content

Commit b2b1f93

Browse files
Lai Jiangshanhtejun
authored andcommitted
workqueue: Rename wq_update_pod() to unbound_wq_update_pwq()
What wq_update_pod() does is just to update the pwq of the specific cpu. Rename it and update the comments. Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent d160a58 commit b2b1f93

File tree

1 file changed

+15
-17
lines changed

1 file changed

+15
-17
lines changed

kernel/workqueue.c

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,7 @@ static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
433433
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
434434

435435
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
436-
static struct workqueue_attrs *wq_update_pod_attrs_buf;
436+
static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
437437

438438
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
439439
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
@@ -5341,13 +5341,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
53415341
}
53425342

53435343
/**
5344-
* wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
5344+
* unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
53455345
* @wq: the target workqueue
5346-
* @cpu: the CPU to update pool association for
5346+
* @cpu: the CPU to update the pwq slot for
53475347
*
53485348
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
5349-
* %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
5350-
* @wq accordingly.
5349+
* %CPU_DOWN_FAILED. @cpu is in the same pod of the CPU being hot[un]plugged.
53515350
*
53525351
*
53535352
* If pod affinity can't be adjusted due to memory allocation failure, it falls
@@ -5360,7 +5359,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
53605359
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
53615360
* responsibility to flush the work item from CPU_DOWN_PREPARE.
53625361
*/
5363-
static void wq_update_pod(struct workqueue_struct *wq, int cpu)
5362+
static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
53645363
{
53655364
struct pool_workqueue *old_pwq = NULL, *pwq;
53665365
struct workqueue_attrs *target_attrs;
@@ -5375,7 +5374,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu)
53755374
* Let's use a preallocated one. The following buf is protected by
53765375
* CPU hotplug exclusion.
53775376
*/
5378-
target_attrs = wq_update_pod_attrs_buf;
5377+
target_attrs = unbound_wq_update_pwq_attrs_buf;
53795378

53805379
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
53815380
wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
@@ -6581,7 +6580,7 @@ int workqueue_online_cpu(unsigned int cpu)
65816580
int tcpu;
65826581

65836582
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6584-
wq_update_pod(wq, tcpu);
6583+
unbound_wq_update_pwq(wq, tcpu);
65856584

65866585
mutex_lock(&wq->mutex);
65876586
wq_update_node_max_active(wq, -1);
@@ -6616,7 +6615,7 @@ int workqueue_offline_cpu(unsigned int cpu)
66166615
int tcpu;
66176616

66186617
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6619-
wq_update_pod(wq, tcpu);
6618+
unbound_wq_update_pwq(wq, tcpu);
66206619

66216620
mutex_lock(&wq->mutex);
66226621
wq_update_node_max_active(wq, cpu);
@@ -6904,9 +6903,8 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
69046903
wq_affn_dfl = affn;
69056904

69066905
list_for_each_entry(wq, &workqueues, list) {
6907-
for_each_online_cpu(cpu) {
6908-
wq_update_pod(wq, cpu);
6909-
}
6906+
for_each_online_cpu(cpu)
6907+
unbound_wq_update_pwq(wq, cpu);
69106908
}
69116909

69126910
mutex_unlock(&wq_pool_mutex);
@@ -7653,8 +7651,8 @@ void __init workqueue_init_early(void)
76537651

76547652
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
76557653

7656-
wq_update_pod_attrs_buf = alloc_workqueue_attrs();
7657-
BUG_ON(!wq_update_pod_attrs_buf);
7654+
unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
7655+
BUG_ON(!unbound_wq_update_pwq_attrs_buf);
76587656

76597657
/*
76607658
* If nohz_full is enabled, set power efficient workqueue as unbound.
@@ -7919,12 +7917,12 @@ void __init workqueue_init_topology(void)
79197917

79207918
/*
79217919
* Workqueues allocated earlier would have all CPUs sharing the default
7922-
* worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
7923-
* combinations to apply per-pod sharing.
7920+
* worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue
7921+
* and CPU combinations to apply per-pod sharing.
79247922
*/
79257923
list_for_each_entry(wq, &workqueues, list) {
79267924
for_each_online_cpu(cpu)
7927-
wq_update_pod(wq, cpu);
7925+
unbound_wq_update_pwq(wq, cpu);
79287926
if (wq->flags & WQ_UNBOUND) {
79297927
mutex_lock(&wq->mutex);
79307928
wq_update_node_max_active(wq, -1);

0 commit comments

Comments
 (0)