Skip to content

Commit 67719ef

Browse files
Sebastian Andrzej SiewiorKAGA-KOKO
authored andcommitted
smp: Add a smp_cond_func_t argument to smp_call_function_many()
on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated mask is a subset of the provided mask based on the conditional function. This memory allocation can be avoided by extending smp_call_function_many() with the conditional function and performing the remote function call based on the mask and the conditional function. Rename smp_call_function_many() to smp_call_function_many_cond() and add the smp_cond_func_t argument. If smp_cond_func_t is provided then it is used before invoking the function. Provide smp_call_function_many() with cond_func set to NULL. Let on_each_cpu_cond_mask() use smp_call_function_many_cond(). Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5671d81 commit 67719ef

File tree

1 file changed

+38
-43
lines changed

1 file changed

+38
-43
lines changed

kernel/smp.c

Lines changed: 38 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
395395
}
396396
EXPORT_SYMBOL_GPL(smp_call_function_any);
397397

398-
/**
399-
* smp_call_function_many(): Run a function on a set of other CPUs.
400-
* @mask: The set of cpus to run on (only runs on online subset).
401-
* @func: The function to run. This must be fast and non-blocking.
402-
* @info: An arbitrary pointer to pass to the function.
403-
* @wait: If true, wait (atomically) until function has completed
404-
* on other CPUs.
405-
*
406-
* If @wait is true, then returns once @func has returned.
407-
*
408-
* You must not call this function with disabled interrupts or from a
409-
* hardware interrupt handler or from a bottom half handler. Preemption
410-
* must be disabled when calling this function.
411-
*/
412-
void smp_call_function_many(const struct cpumask *mask,
413-
smp_call_func_t func, void *info, bool wait)
398+
static void smp_call_function_many_cond(const struct cpumask *mask,
399+
smp_call_func_t func, void *info,
400+
bool wait, smp_cond_func_t cond_func)
414401
{
415402
struct call_function_data *cfd;
416403
int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
448435

449436
/* Fastpath: do that cpu by itself. */
450437
if (next_cpu >= nr_cpu_ids) {
451-
smp_call_function_single(cpu, func, info, wait);
438+
if (!cond_func || (cond_func && cond_func(cpu, info)))
439+
smp_call_function_single(cpu, func, info, wait);
452440
return;
453441
}
454442

@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
465453
for_each_cpu(cpu, cfd->cpumask) {
466454
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
467455

456+
if (cond_func && !cond_func(cpu, info))
457+
continue;
458+
468459
csd_lock(csd);
469460
if (wait)
470461
csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
486477
}
487478
}
488479
}
480+
481+
/**
482+
* smp_call_function_many(): Run a function on a set of other CPUs.
483+
* @mask: The set of cpus to run on (only runs on online subset).
484+
* @func: The function to run. This must be fast and non-blocking.
485+
* @info: An arbitrary pointer to pass to the function.
486+
* @wait: If true, wait (atomically) until function has completed
487+
* on other CPUs.
488+
*
489+
* If @wait is true, then returns once @func has returned.
490+
*
491+
* You must not call this function with disabled interrupts or from a
492+
* hardware interrupt handler or from a bottom half handler. Preemption
493+
* must be disabled when calling this function.
494+
*/
495+
void smp_call_function_many(const struct cpumask *mask,
496+
smp_call_func_t func, void *info, bool wait)
497+
{
498+
smp_call_function_many_cond(mask, func, info, wait, NULL);
499+
}
489500
EXPORT_SYMBOL(smp_call_function_many);
490501

491502
/**
@@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
684695
void *info, bool wait, gfp_t gfp_flags,
685696
const struct cpumask *mask)
686697
{
687-
cpumask_var_t cpus;
688-
int cpu, ret;
689-
690-
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
691-
692-
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
693-
preempt_disable();
694-
for_each_cpu(cpu, mask)
695-
if (cond_func(cpu, info))
696-
__cpumask_set_cpu(cpu, cpus);
697-
on_each_cpu_mask(cpus, func, info, wait);
698-
preempt_enable();
699-
free_cpumask_var(cpus);
700-
} else {
701-
/*
702-
* No free cpumask, bother. No matter, we'll
703-
* just have to IPI them one by one.
704-
*/
705-
preempt_disable();
706-
for_each_cpu(cpu, mask)
707-
if (cond_func(cpu, info)) {
708-
ret = smp_call_function_single(cpu, func,
709-
info, wait);
710-
WARN_ON_ONCE(ret);
711-
}
712-
preempt_enable();
698+
int cpu = get_cpu();
699+
700+
smp_call_function_many_cond(mask, func, info, wait, cond_func);
701+
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
702+
unsigned long flags;
703+
704+
local_irq_save(flags);
705+
func(info);
706+
local_irq_restore(flags);
713707
}
708+
put_cpu();
714709
}
715710
EXPORT_SYMBOL(on_each_cpu_cond_mask);
716711

0 commit comments

Comments
 (0)