Skip to content

Commit e7bdd7f

Browse files
Ming LeiKAGA-KOKO
authored andcommitted
genirq/affinity: Don't pass irq_affinity_desc array to irq_build_affinity_masks
Prepare for abstracting irq_build_affinity_masks() into a public function for assigning all CPUs evenly into several groups. Don't pass irq_affinity_desc array to irq_build_affinity_masks, instead return a cpumask array by storing each assigned group into one element of the array. This allows to provide a generic interface for grouping all CPUs evenly from a NUMA and CPU locality viewpoint, and the cost is one extra allocation in irq_build_affinity_masks(), which should be fine since it is done via GFP_KERNEL and irq_build_affinity_masks() is a slow path anyway. Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: John Garry <[email protected]> Reviewed-by: Jens Axboe <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1f962d9 commit e7bdd7f

File tree

1 file changed

+24
-10
lines changed

1 file changed

+24
-10
lines changed

kernel/irq/affinity.c

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
249249
cpumask_var_t *node_to_cpumask,
250250
const struct cpumask *cpu_mask,
251251
struct cpumask *nmsk,
252-
struct irq_affinity_desc *masks)
252+
struct cpumask *masks)
253253
{
254254
unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
255255
unsigned int last_affv = numvecs;
@@ -270,7 +270,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
270270
for_each_node_mask(n, nodemsk) {
271271
/* Ensure that only CPUs which are in both masks are set */
272272
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
273-
cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
273+
cpumask_or(&masks[curvec], &masks[curvec], nmsk);
274274
if (++curvec == last_affv)
275275
curvec = 0;
276276
}
@@ -321,7 +321,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
321321
*/
322322
if (curvec >= last_affv)
323323
curvec = 0;
324-
irq_spread_init_one(&masks[curvec].mask, nmsk,
324+
irq_spread_init_one(&masks[curvec], nmsk,
325325
cpus_per_vec);
326326
}
327327
done += nv->nvectors;
@@ -335,16 +335,16 @@ static int __irq_build_affinity_masks(unsigned int startvec,
335335
* 1) spread present CPU on these vectors
336336
* 2) spread other possible CPUs on these vectors
337337
*/
338-
static int irq_build_affinity_masks(unsigned int numvecs,
339-
struct irq_affinity_desc *masks)
338+
static struct cpumask *irq_build_affinity_masks(unsigned int numvecs)
340339
{
341340
unsigned int curvec = 0, nr_present = 0, nr_others = 0;
342341
cpumask_var_t *node_to_cpumask;
343342
cpumask_var_t nmsk, npresmsk;
344343
int ret = -ENOMEM;
344+
struct cpumask *masks = NULL;
345345

346346
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
347-
return ret;
347+
return NULL;
348348

349349
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
350350
goto fail_nmsk;
@@ -353,6 +353,10 @@ static int irq_build_affinity_masks(unsigned int numvecs,
353353
if (!node_to_cpumask)
354354
goto fail_npresmsk;
355355

356+
masks = kcalloc(numvecs, sizeof(*masks), GFP_KERNEL);
357+
if (!masks)
358+
goto fail_node_to_cpumask;
359+
356360
/* Stabilize the cpumasks */
357361
cpus_read_lock();
358362
build_node_to_cpumask(node_to_cpumask);
@@ -386,14 +390,19 @@ static int irq_build_affinity_masks(unsigned int numvecs,
386390
if (ret >= 0)
387391
WARN_ON(nr_present + nr_others < numvecs);
388392

393+
fail_node_to_cpumask:
389394
free_node_to_cpumask(node_to_cpumask);
390395

391396
fail_npresmsk:
392397
free_cpumask_var(npresmsk);
393398

394399
fail_nmsk:
395400
free_cpumask_var(nmsk);
396-
return ret < 0 ? ret : 0;
401+
if (ret < 0) {
402+
kfree(masks);
403+
return NULL;
404+
}
405+
return masks;
397406
}
398407

399408
static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
@@ -457,13 +466,18 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
457466
*/
458467
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
459468
unsigned int this_vecs = affd->set_size[i];
460-
int ret;
469+
int j;
470+
struct cpumask *result = irq_build_affinity_masks(this_vecs);
461471

462-
ret = irq_build_affinity_masks(this_vecs, &masks[curvec]);
463-
if (ret) {
472+
if (!result) {
464473
kfree(masks);
465474
return NULL;
466475
}
476+
477+
for (j = 0; j < this_vecs; j++)
478+
cpumask_copy(&masks[curvec + j].mask, &result[j]);
479+
kfree(result);
480+
467481
curvec += this_vecs;
468482
usedvecs += this_vecs;
469483
}

0 commit comments

Comments
 (0)