Skip to content

Commit d0a7166

Browse files
committed
x86/smp: Move smp_function_call implementations into IPI code
Move it where it belongs. That allows to keep all the shorthand logic in one place. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 22ca7ee commit d0a7166

File tree

3 files changed

+41
-40
lines changed

3 files changed

+41
-40
lines changed

arch/x86/include/asm/smp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ void play_dead_common(void);
143143
void wbinvd_on_cpu(int cpu);
144144
int wbinvd_on_all_cpus(void);
145145

146+
void native_smp_send_reschedule(int cpu);
146147
void native_send_call_func_ipi(const struct cpumask *mask);
147148
void native_send_call_func_single_ipi(int cpu);
148149
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);

arch/x86/kernel/apic/ipi.c

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,46 @@ void apic_send_IPI_allbutself(unsigned int vector)
6262
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
6363
}
6464

65+
/*
66+
* Send a 'reschedule' IPI to another CPU. It goes straight through and
67+
* wastes no time serializing anything. Worst case is that we lose a
68+
* reschedule ...
69+
*/
70+
void native_smp_send_reschedule(int cpu)
71+
{
72+
if (unlikely(cpu_is_offline(cpu))) {
73+
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
74+
return;
75+
}
76+
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
77+
}
78+
79+
void native_send_call_func_single_ipi(int cpu)
80+
{
81+
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
82+
}
83+
84+
void native_send_call_func_ipi(const struct cpumask *mask)
85+
{
86+
cpumask_var_t allbutself;
87+
88+
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
89+
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
90+
return;
91+
}
92+
93+
cpumask_copy(allbutself, cpu_online_mask);
94+
__cpumask_clear_cpu(smp_processor_id(), allbutself);
95+
96+
if (cpumask_equal(mask, allbutself) &&
97+
cpumask_equal(cpu_online_mask, cpu_callout_mask))
98+
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
99+
else
100+
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
101+
102+
free_cpumask_var(allbutself);
103+
}
104+
65105
#endif /* CONFIG_SMP */
66106

67107
static inline int __prepare_ICR2(unsigned int mask)

arch/x86/kernel/smp.c

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -115,46 +115,6 @@
115115
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
116116
static bool smp_no_nmi_ipi = false;
117117

118-
/*
119-
* this function sends a 'reschedule' IPI to another CPU.
120-
* it goes straight through and wastes no time serializing
121-
* anything. Worst case is that we lose a reschedule ...
122-
*/
123-
static void native_smp_send_reschedule(int cpu)
124-
{
125-
if (unlikely(cpu_is_offline(cpu))) {
126-
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
127-
return;
128-
}
129-
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
130-
}
131-
132-
void native_send_call_func_single_ipi(int cpu)
133-
{
134-
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
135-
}
136-
137-
void native_send_call_func_ipi(const struct cpumask *mask)
138-
{
139-
cpumask_var_t allbutself;
140-
141-
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
142-
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
143-
return;
144-
}
145-
146-
cpumask_copy(allbutself, cpu_online_mask);
147-
__cpumask_clear_cpu(smp_processor_id(), allbutself);
148-
149-
if (cpumask_equal(mask, allbutself) &&
150-
cpumask_equal(cpu_online_mask, cpu_callout_mask))
151-
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
152-
else
153-
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
154-
155-
free_cpumask_var(allbutself);
156-
}
157-
158118
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
159119
{
160120
/* We are registered on stopping cpu too, avoid spurious NMI */

0 commit comments

Comments
 (0)