Skip to content

Commit a86feae

Browse files
committed
workqueue: Move wq_pod_init() below workqueue_init()
wq_pod_init() is called from workqueue_init() and responsible for initializing unbound CPU pods according to NUMA node. Workqueue is in the process of improving affinity awareness and wants to use other topology information to initialize unbound CPU pods; however, unlike NUMA nodes, other topology information isn't yet available in workqueue_init(). The next patch will introduce a later stage init function for workqueue which will be responsible for initializing unbound CPU pods. Relocate wq_pod_init() below workqueue_init() where the new init function is going to be located so that the diff can show the content differences. Just a relocation. No functional changes. Signed-off-by: Tejun Heo <[email protected]>
1 parent fef59c9 commit a86feae

File tree

1 file changed

+40
-38
lines changed

1 file changed

+40
-38
lines changed

kernel/workqueue.c

Lines changed: 40 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -6256,44 +6256,7 @@ static inline void wq_watchdog_init(void) { }
62566256

62576257
#endif /* CONFIG_WQ_WATCHDOG */
62586258

6259-
static void __init wq_pod_init(void)
6260-
{
6261-
cpumask_var_t *tbl;
6262-
int node, cpu;
6263-
6264-
if (num_possible_nodes() <= 1)
6265-
return;
6266-
6267-
for_each_possible_cpu(cpu) {
6268-
if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6269-
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6270-
return;
6271-
}
6272-
}
6273-
6274-
wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6275-
BUG_ON(!wq_update_pod_attrs_buf);
6276-
6277-
/*
6278-
* We want masks of possible CPUs of each node which isn't readily
6279-
* available. Build one from cpu_to_node() which should have been
6280-
* fully initialized by now.
6281-
*/
6282-
tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
6283-
BUG_ON(!tbl);
6284-
6285-
for_each_node(node)
6286-
BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
6287-
node_online(node) ? node : NUMA_NO_NODE));
6288-
6289-
for_each_possible_cpu(cpu) {
6290-
node = cpu_to_node(cpu);
6291-
cpumask_set_cpu(cpu, tbl[node]);
6292-
}
6293-
6294-
wq_pod_cpus = tbl;
6295-
wq_pod_enabled = true;
6296-
}
6259+
static void wq_pod_init(void);
62976260

62986261
/**
62996262
* workqueue_init_early - early init for workqueue subsystem
@@ -6474,6 +6437,45 @@ void __init workqueue_init(void)
64746437
wq_watchdog_init();
64756438
}
64766439

6440+
static void __init wq_pod_init(void)
6441+
{
6442+
cpumask_var_t *tbl;
6443+
int node, cpu;
6444+
6445+
if (num_possible_nodes() <= 1)
6446+
return;
6447+
6448+
for_each_possible_cpu(cpu) {
6449+
if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6450+
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6451+
return;
6452+
}
6453+
}
6454+
6455+
wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6456+
BUG_ON(!wq_update_pod_attrs_buf);
6457+
6458+
/*
6459+
* We want masks of possible CPUs of each node which isn't readily
6460+
* available. Build one from cpu_to_node() which should have been
6461+
* fully initialized by now.
6462+
*/
6463+
tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
6464+
BUG_ON(!tbl);
6465+
6466+
for_each_node(node)
6467+
BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
6468+
node_online(node) ? node : NUMA_NO_NODE));
6469+
6470+
for_each_possible_cpu(cpu) {
6471+
node = cpu_to_node(cpu);
6472+
cpumask_set_cpu(cpu, tbl[node]);
6473+
}
6474+
6475+
wq_pod_cpus = tbl;
6476+
wq_pod_enabled = true;
6477+
}
6478+
64776479
void __warn_flushing_systemwide_wq(void)
64786480
{
64796481
pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");

0 commit comments

Comments
 (0)