Skip to content

Commit a5aa5ce

Browse files
anadavIngo Molnar
authored andcommitted
smp: Inline on_each_cpu_cond() and on_each_cpu()
Simplify the code and avoid having an additional function on the stack by inlining on_each_cpu_cond() and on_each_cpu(). Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Nadav Amit <[email protected]> [ Minor edits. ] Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1608e4c commit a5aa5ce

File tree

3 files changed

+37
-107
lines changed

3 files changed

+37
-107
lines changed

include/linux/smp.h

Lines changed: 36 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -50,30 +50,52 @@ extern unsigned int total_cpus;
5050
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
5151
int wait);
5252

53+
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54+
void *info, bool wait, const struct cpumask *mask);
55+
56+
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
57+
5358
/*
5459
* Call a function on all processors
5560
*/
56-
void on_each_cpu(smp_call_func_t func, void *info, int wait);
61+
static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
62+
{
63+
on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
64+
}
5765

58-
/*
59-
* Call a function on processors specified by mask, which might include
60-
* the local one.
66+
/**
67+
* on_each_cpu_mask(): Run a function on processors specified by
68+
* cpumask, which may include the local processor.
69+
* @mask: The set of cpus to run on (only runs on online subset).
70+
* @func: The function to run. This must be fast and non-blocking.
71+
* @info: An arbitrary pointer to pass to the function.
72+
* @wait: If true, wait (atomically) until function has completed
73+
* on other CPUs.
74+
*
75+
* If @wait is true, then returns once @func has returned.
76+
*
77+
* You must not call this function with disabled interrupts or from a
78+
* hardware interrupt handler or from a bottom half handler. The
79+
* exception is that it may be used during early boot while
80+
* early_boot_irqs_disabled is set.
6181
*/
62-
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
63-
void *info, bool wait);
82+
static inline void on_each_cpu_mask(const struct cpumask *mask,
83+
smp_call_func_t func, void *info, bool wait)
84+
{
85+
on_each_cpu_cond_mask(NULL, func, info, wait, mask);
86+
}
6487

6588
/*
6689
* Call a function on each processor for which the supplied function
6790
* cond_func returns a positive value. This may include the local
68-
* processor.
91+
* processor. May be used during early boot while early_boot_irqs_disabled is
92+
* set. Use local_irq_save/restore() instead of local_irq_disable/enable().
6993
*/
70-
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
71-
void *info, bool wait);
72-
73-
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
74-
void *info, bool wait, const struct cpumask *mask);
75-
76-
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
94+
static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
95+
smp_call_func_t func, void *info, bool wait)
96+
{
97+
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
98+
}
7799

78100
#ifdef CONFIG_SMP
79101

kernel/smp.c

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -847,55 +847,6 @@ void __init smp_init(void)
847847
smp_cpus_done(setup_max_cpus);
848848
}
849849

850-
/*
851-
* Call a function on all processors. May be used during early boot while
852-
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
853-
* of local_irq_disable/enable().
854-
*/
855-
void on_each_cpu(smp_call_func_t func, void *info, int wait)
856-
{
857-
unsigned long flags;
858-
859-
preempt_disable();
860-
smp_call_function(func, info, wait);
861-
local_irq_save(flags);
862-
func(info);
863-
local_irq_restore(flags);
864-
preempt_enable();
865-
}
866-
EXPORT_SYMBOL(on_each_cpu);
867-
868-
/**
869-
* on_each_cpu_mask(): Run a function on processors specified by
870-
* cpumask, which may include the local processor.
871-
* @mask: The set of cpus to run on (only runs on online subset).
872-
* @func: The function to run. This must be fast and non-blocking.
873-
* @info: An arbitrary pointer to pass to the function.
874-
* @wait: If true, wait (atomically) until function has completed
875-
* on other CPUs.
876-
*
877-
* If @wait is true, then returns once @func has returned.
878-
*
879-
* You must not call this function with disabled interrupts or from a
880-
* hardware interrupt handler or from a bottom half handler. The
881-
* exception is that it may be used during early boot while
882-
* early_boot_irqs_disabled is set.
883-
*/
884-
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
885-
void *info, bool wait)
886-
{
887-
unsigned int scf_flags;
888-
889-
scf_flags = SCF_RUN_LOCAL;
890-
if (wait)
891-
scf_flags |= SCF_WAIT;
892-
893-
preempt_disable();
894-
smp_call_function_many_cond(mask, func, info, scf_flags, NULL);
895-
preempt_enable();
896-
}
897-
EXPORT_SYMBOL(on_each_cpu_mask);
898-
899850
/*
900851
* on_each_cpu_cond(): Call a function on each processor for which
901852
* the supplied function cond_func returns true, optionally waiting
@@ -932,13 +883,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
932883
}
933884
EXPORT_SYMBOL(on_each_cpu_cond_mask);
934885

935-
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
936-
void *info, bool wait)
937-
{
938-
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
939-
}
940-
EXPORT_SYMBOL(on_each_cpu_cond);
941-
942886
static void do_nothing(void *unused)
943887
{
944888
}

kernel/up.c

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -36,35 +36,6 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
3636
}
3737
EXPORT_SYMBOL(smp_call_function_single_async);
3838

39-
void on_each_cpu(smp_call_func_t func, void *info, int wait)
40-
{
41-
unsigned long flags;
42-
43-
local_irq_save(flags);
44-
func(info);
45-
local_irq_restore(flags);
46-
}
47-
EXPORT_SYMBOL(on_each_cpu);
48-
49-
/*
50-
* Note we still need to test the mask even for UP
51-
* because we actually can get an empty mask from
52-
* code that on SMP might call us without the local
53-
* CPU in the mask.
54-
*/
55-
void on_each_cpu_mask(const struct cpumask *mask,
56-
smp_call_func_t func, void *info, bool wait)
57-
{
58-
unsigned long flags;
59-
60-
if (cpumask_test_cpu(0, mask)) {
61-
local_irq_save(flags);
62-
func(info);
63-
local_irq_restore(flags);
64-
}
65-
}
66-
EXPORT_SYMBOL(on_each_cpu_mask);
67-
6839
/*
6940
* Preemption is disabled here to make sure the cond_func is called under the
7041
* same condtions in UP and SMP.
@@ -75,7 +46,7 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
7546
unsigned long flags;
7647

7748
preempt_disable();
78-
if (cond_func(0, info)) {
49+
if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
7950
local_irq_save(flags);
8051
func(info);
8152
local_irq_restore(flags);
@@ -84,13 +55,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
8455
}
8556
EXPORT_SYMBOL(on_each_cpu_cond_mask);
8657

87-
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
88-
void *info, bool wait)
89-
{
90-
on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
91-
}
92-
EXPORT_SYMBOL(on_each_cpu_cond);
93-
9458
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
9559
{
9660
int ret;

0 commit comments

Comments
 (0)