Skip to content

Commit ab67f60

Browse files
committed
Merge tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core SMP updates from Thomas Gleixner: "A small set of SMP core code changes: - Rework the smp function call core code to avoid the allocation of an additional cpumask - Remove the not longer required GFP argument from on_each_cpu_cond() and on_each_cpu_cond_mask() and fixup the callers" * tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: smp: Remove allocation mask from on_each_cpu_cond.*() smp: Add a smp_cond_func_t argument to smp_call_function_many() smp: Use smp_cond_func_t as type for the conditional function
2 parents e279160 + cb92315 commit ab67f60

File tree

6 files changed

+56
-72
lines changed

6 files changed

+56
-72
lines changed

arch/x86/mm/tlb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
708708
(void *)info, 1);
709709
else
710710
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
711-
(void *)info, 1, GFP_ATOMIC, cpumask);
711+
(void *)info, 1, cpumask);
712712
}
713713

714714
/*

fs/buffer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
14331433

14341434
void invalidate_bh_lrus(void)
14351435
{
1436-
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1436+
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
14371437
}
14381438
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14391439

include/linux/smp.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/llist.h>
1616

1717
typedef void (*smp_call_func_t)(void *info);
18+
typedef bool (*smp_cond_func_t)(int cpu, void *info);
1819
struct __call_single_data {
1920
struct llist_node llist;
2021
smp_call_func_t func;
@@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
4950
* cond_func returns a positive value. This may include the local
5051
* processor.
5152
*/
52-
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
53-
smp_call_func_t func, void *info, bool wait,
54-
gfp_t gfp_flags);
53+
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
54+
void *info, bool wait);
5555

56-
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
57-
smp_call_func_t func, void *info, bool wait,
58-
gfp_t gfp_flags, const struct cpumask *mask);
56+
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
57+
void *info, bool wait, const struct cpumask *mask);
5958

6059
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
6160

kernel/smp.c

Lines changed: 43 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
395395
}
396396
EXPORT_SYMBOL_GPL(smp_call_function_any);
397397

398-
/**
399-
* smp_call_function_many(): Run a function on a set of other CPUs.
400-
* @mask: The set of cpus to run on (only runs on online subset).
401-
* @func: The function to run. This must be fast and non-blocking.
402-
* @info: An arbitrary pointer to pass to the function.
403-
* @wait: If true, wait (atomically) until function has completed
404-
* on other CPUs.
405-
*
406-
* If @wait is true, then returns once @func has returned.
407-
*
408-
* You must not call this function with disabled interrupts or from a
409-
* hardware interrupt handler or from a bottom half handler. Preemption
410-
* must be disabled when calling this function.
411-
*/
412-
void smp_call_function_many(const struct cpumask *mask,
413-
smp_call_func_t func, void *info, bool wait)
398+
static void smp_call_function_many_cond(const struct cpumask *mask,
399+
smp_call_func_t func, void *info,
400+
bool wait, smp_cond_func_t cond_func)
414401
{
415402
struct call_function_data *cfd;
416403
int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
448435

449436
/* Fastpath: do that cpu by itself. */
450437
if (next_cpu >= nr_cpu_ids) {
451-
smp_call_function_single(cpu, func, info, wait);
438+
if (!cond_func || (cond_func && cond_func(cpu, info)))
439+
smp_call_function_single(cpu, func, info, wait);
452440
return;
453441
}
454442

@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
465453
for_each_cpu(cpu, cfd->cpumask) {
466454
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
467455

456+
if (cond_func && !cond_func(cpu, info))
457+
continue;
458+
468459
csd_lock(csd);
469460
if (wait)
470461
csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
486477
}
487478
}
488479
}
480+
481+
/**
482+
* smp_call_function_many(): Run a function on a set of other CPUs.
483+
* @mask: The set of cpus to run on (only runs on online subset).
484+
* @func: The function to run. This must be fast and non-blocking.
485+
* @info: An arbitrary pointer to pass to the function.
486+
* @wait: If true, wait (atomically) until function has completed
487+
* on other CPUs.
488+
*
489+
* If @wait is true, then returns once @func has returned.
490+
*
491+
* You must not call this function with disabled interrupts or from a
492+
* hardware interrupt handler or from a bottom half handler. Preemption
493+
* must be disabled when calling this function.
494+
*/
495+
void smp_call_function_many(const struct cpumask *mask,
496+
smp_call_func_t func, void *info, bool wait)
497+
{
498+
smp_call_function_many_cond(mask, func, info, wait, NULL);
499+
}
489500
EXPORT_SYMBOL(smp_call_function_many);
490501

491502
/**
@@ -668,58 +679,34 @@ EXPORT_SYMBOL(on_each_cpu_mask);
668679
* @info: An arbitrary pointer to pass to both functions.
669680
* @wait: If true, wait (atomically) until function has
670681
* completed on other CPUs.
671-
* @gfp_flags: GFP flags to use when allocating the cpumask
672-
* used internally by the function.
673-
*
674-
* The function might sleep if the GFP flags indicates a non
675-
* atomic allocation is allowed.
676682
*
677683
* Preemption is disabled to protect against CPUs going offline but not online.
678684
* CPUs going online during the call will not be seen or sent an IPI.
679685
*
680686
* You must not call this function with disabled interrupts or
681687
* from a hardware interrupt handler or from a bottom half handler.
682688
*/
683-
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
684-
smp_call_func_t func, void *info, bool wait,
685-
gfp_t gfp_flags, const struct cpumask *mask)
689+
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
690+
void *info, bool wait, const struct cpumask *mask)
686691
{
687-
cpumask_var_t cpus;
688-
int cpu, ret;
689-
690-
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
691-
692-
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
693-
preempt_disable();
694-
for_each_cpu(cpu, mask)
695-
if (cond_func(cpu, info))
696-
__cpumask_set_cpu(cpu, cpus);
697-
on_each_cpu_mask(cpus, func, info, wait);
698-
preempt_enable();
699-
free_cpumask_var(cpus);
700-
} else {
701-
/*
702-
* No free cpumask, bother. No matter, we'll
703-
* just have to IPI them one by one.
704-
*/
705-
preempt_disable();
706-
for_each_cpu(cpu, mask)
707-
if (cond_func(cpu, info)) {
708-
ret = smp_call_function_single(cpu, func,
709-
info, wait);
710-
WARN_ON_ONCE(ret);
711-
}
712-
preempt_enable();
692+
int cpu = get_cpu();
693+
694+
smp_call_function_many_cond(mask, func, info, wait, cond_func);
695+
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
696+
unsigned long flags;
697+
698+
local_irq_save(flags);
699+
func(info);
700+
local_irq_restore(flags);
713701
}
702+
put_cpu();
714703
}
715704
EXPORT_SYMBOL(on_each_cpu_cond_mask);
716705

717-
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
718-
smp_call_func_t func, void *info, bool wait,
719-
gfp_t gfp_flags)
706+
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
707+
void *info, bool wait)
720708
{
721-
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
722-
cpu_online_mask);
709+
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
723710
}
724711
EXPORT_SYMBOL(on_each_cpu_cond);
725712

kernel/up.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,8 @@ EXPORT_SYMBOL(on_each_cpu_mask);
6868
* Preemption is disabled here to make sure the cond_func is called under the
6969
* same condtions in UP and SMP.
7070
*/
71-
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
72-
smp_call_func_t func, void *info, bool wait,
73-
gfp_t gfp_flags, const struct cpumask *mask)
71+
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
72+
void *info, bool wait, const struct cpumask *mask)
7473
{
7574
unsigned long flags;
7675

@@ -84,11 +83,10 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
8483
}
8584
EXPORT_SYMBOL(on_each_cpu_cond_mask);
8685

87-
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
88-
smp_call_func_t func, void *info, bool wait,
89-
gfp_t gfp_flags)
86+
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
87+
void *info, bool wait)
9088
{
91-
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
89+
on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
9290
}
9391
EXPORT_SYMBOL(on_each_cpu_cond);
9492

mm/slub.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
23412341

23422342
static void flush_all(struct kmem_cache *s)
23432343
{
2344-
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2344+
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
23452345
}
23462346

23472347
/*

0 commit comments

Comments
 (0)