12
12
#include <linux/clockchips.h>
13
13
#include <linux/interrupt.h>
14
14
#include <linux/module.h>
15
+ #include <linux/kexec.h>
15
16
#include <linux/profile.h>
16
17
#include <linux/smp.h>
17
18
#include <linux/sched.h>
22
23
#include <asm/sbi.h>
23
24
#include <asm/tlbflush.h>
24
25
#include <asm/cacheflush.h>
26
+ #include <asm/cpu_ops.h>
25
27
26
28
enum ipi_message_type {
27
29
IPI_RESCHEDULE ,
28
30
IPI_CALL_FUNC ,
29
31
IPI_CPU_STOP ,
32
+ IPI_CPU_CRASH_STOP ,
30
33
IPI_IRQ_WORK ,
31
34
IPI_TIMER ,
32
35
IPI_MAX
@@ -71,6 +74,32 @@ static void ipi_stop(void)
71
74
wait_for_interrupt ();
72
75
}
73
76
77
+ #ifdef CONFIG_KEXEC_CORE
78
+ static atomic_t waiting_for_crash_ipi = ATOMIC_INIT (0 );
79
+
80
+ static inline void ipi_cpu_crash_stop (unsigned int cpu , struct pt_regs * regs )
81
+ {
82
+ crash_save_cpu (regs , cpu );
83
+
84
+ atomic_dec (& waiting_for_crash_ipi );
85
+
86
+ local_irq_disable ();
87
+
88
+ #ifdef CONFIG_HOTPLUG_CPU
89
+ if (cpu_has_hotplug (cpu ))
90
+ cpu_ops [cpu ]-> cpu_stop ();
91
+ #endif
92
+
93
+ for (;;)
94
+ wait_for_interrupt ();
95
+ }
96
+ #else
97
+ static inline void ipi_cpu_crash_stop (unsigned int cpu , struct pt_regs * regs )
98
+ {
99
+ unreachable ();
100
+ }
101
+ #endif
102
+
74
103
static const struct riscv_ipi_ops * ipi_ops __ro_after_init ;
75
104
76
105
void riscv_set_ipi_ops (const struct riscv_ipi_ops * ops )
@@ -124,8 +153,9 @@ void arch_irq_work_raise(void)
124
153
125
154
void handle_IPI (struct pt_regs * regs )
126
155
{
127
- unsigned long * pending_ipis = & ipi_data [smp_processor_id ()].bits ;
128
- unsigned long * stats = ipi_data [smp_processor_id ()].stats ;
156
+ unsigned int cpu = smp_processor_id ();
157
+ unsigned long * pending_ipis = & ipi_data [cpu ].bits ;
158
+ unsigned long * stats = ipi_data [cpu ].stats ;
129
159
130
160
riscv_clear_ipi ();
131
161
@@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs)
154
184
ipi_stop ();
155
185
}
156
186
187
+ if (ops & (1 << IPI_CPU_CRASH_STOP )) {
188
+ ipi_cpu_crash_stop (cpu , get_irq_regs ());
189
+ }
190
+
157
191
if (ops & (1 << IPI_IRQ_WORK )) {
158
192
stats [IPI_IRQ_WORK ]++ ;
159
193
irq_work_run ();
@@ -176,6 +210,7 @@ static const char * const ipi_names[] = {
176
210
[IPI_RESCHEDULE ] = "Rescheduling interrupts" ,
177
211
[IPI_CALL_FUNC ] = "Function call interrupts" ,
178
212
[IPI_CPU_STOP ] = "CPU stop interrupts" ,
213
+ [IPI_CPU_CRASH_STOP ] = "CPU stop (for crash dump) interrupts" ,
179
214
[IPI_IRQ_WORK ] = "IRQ work interrupts" ,
180
215
[IPI_TIMER ] = "Timer broadcast interrupts" ,
181
216
};
@@ -235,6 +270,64 @@ void smp_send_stop(void)
235
270
cpumask_pr_args (cpu_online_mask ));
236
271
}
237
272
273
+ #ifdef CONFIG_KEXEC_CORE
274
+ /*
275
+ * The number of CPUs online, not counting this CPU (which may not be
276
+ * fully online and so not counted in num_online_cpus()).
277
+ */
278
+ static inline unsigned int num_other_online_cpus (void )
279
+ {
280
+ unsigned int this_cpu_online = cpu_online (smp_processor_id ());
281
+
282
+ return num_online_cpus () - this_cpu_online ;
283
+ }
284
+
285
+ void crash_smp_send_stop (void )
286
+ {
287
+ static int cpus_stopped ;
288
+ cpumask_t mask ;
289
+ unsigned long timeout ;
290
+
291
+ /*
292
+ * This function can be called twice in panic path, but obviously
293
+ * we execute this only once.
294
+ */
295
+ if (cpus_stopped )
296
+ return ;
297
+
298
+ cpus_stopped = 1 ;
299
+
300
+ /*
301
+ * If this cpu is the only one alive at this point in time, online or
302
+ * not, there are no stop messages to be sent around, so just back out.
303
+ */
304
+ if (num_other_online_cpus () == 0 )
305
+ return ;
306
+
307
+ cpumask_copy (& mask , cpu_online_mask );
308
+ cpumask_clear_cpu (smp_processor_id (), & mask );
309
+
310
+ atomic_set (& waiting_for_crash_ipi , num_other_online_cpus ());
311
+
312
+ pr_crit ("SMP: stopping secondary CPUs\n" );
313
+ send_ipi_mask (& mask , IPI_CPU_CRASH_STOP );
314
+
315
+ /* Wait up to one second for other CPUs to stop */
316
+ timeout = USEC_PER_SEC ;
317
+ while ((atomic_read (& waiting_for_crash_ipi ) > 0 ) && timeout -- )
318
+ udelay (1 );
319
+
320
+ if (atomic_read (& waiting_for_crash_ipi ) > 0 )
321
+ pr_warn ("SMP: failed to stop secondary CPUs %*pbl\n" ,
322
+ cpumask_pr_args (& mask ));
323
+ }
324
+
325
+ bool smp_crash_stop_failed (void )
326
+ {
327
+ return (atomic_read (& waiting_for_crash_ipi ) > 0 );
328
+ }
329
+ #endif
330
+
238
331
void smp_send_reschedule (int cpu )
239
332
{
240
333
send_ipi_single (cpu , IPI_RESCHEDULE );
0 commit comments