Skip to content

Commit 0e15c3c

Browse files
committed
Merge tag 'riscv-for-linus-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - build fix for the NR_CPUS Kconfig SBI version dependency - fixes to early memory initialization, to fix page permissions in EFI and post-initmem-free - build fix for the VDSO, to avoid trying to profile the VDSO functions - fixes for kexec crash handling, to fix multi-core and interrupt related initialization inside the crash kernel - fix for a race condition when handling multiple concurrect kernel stack overflows * tag 'riscv-for-linus-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: kexec: Fixup crash_smp_send_stop without multi cores riscv: kexec: Fixup irq controller broken in kexec crash path riscv: mm: Proper page permissions after initmem free riscv: vdso: fix section overlapping under some conditions riscv: fix race when vmap stack overflow riscv: Sync efi page table's kernel mappings before switching riscv: Fix NR_CPUS range conditions
2 parents 2df2adc + 39cefc5 commit 0e15c3c

File tree

11 files changed

+187
-24
lines changed

11 files changed

+187
-24
lines changed

arch/riscv/Kconfig

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -317,9 +317,9 @@ config SMP
317317
config NR_CPUS
318318
int "Maximum number of CPUs (2-512)"
319319
depends on SMP
320-
range 2 512 if !SBI_V01
321-
range 2 32 if SBI_V01 && 32BIT
322-
range 2 64 if SBI_V01 && 64BIT
320+
range 2 512 if !RISCV_SBI_V01
321+
range 2 32 if RISCV_SBI_V01 && 32BIT
322+
range 2 64 if RISCV_SBI_V01 && 64BIT
323323
default "32" if 32BIT
324324
default "64" if 64BIT
325325

arch/riscv/include/asm/asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#define REG_L __REG_SEL(ld, lw)
2424
#define REG_S __REG_SEL(sd, sw)
2525
#define REG_SC __REG_SEL(sc.d, sc.w)
26+
#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
2627
#define REG_ASM __REG_SEL(.dword, .word)
2728
#define SZREG __REG_SEL(8, 4)
2829
#define LGREG __REG_SEL(3, 2)

arch/riscv/include/asm/efi.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <asm/mmu_context.h>
1111
#include <asm/ptrace.h>
1212
#include <asm/tlbflush.h>
13+
#include <asm/pgalloc.h>
1314

1415
#ifdef CONFIG_EFI
1516
extern void efi_init(void);
@@ -20,7 +21,10 @@ extern void efi_init(void);
2021
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
2122
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
2223

23-
#define arch_efi_call_virt_setup() efi_virtmap_load()
24+
#define arch_efi_call_virt_setup() ({ \
25+
sync_kernel_mappings(efi_mm.pgd); \
26+
efi_virtmap_load(); \
27+
})
2428
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
2529

2630
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)

arch/riscv/include/asm/pgalloc.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
127127
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
128128
#endif /* __PAGETABLE_PMD_FOLDED */
129129

130+
static inline void sync_kernel_mappings(pgd_t *pgd)
131+
{
132+
memcpy(pgd + USER_PTRS_PER_PGD,
133+
init_mm.pgd + USER_PTRS_PER_PGD,
134+
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
135+
}
136+
130137
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
131138
{
132139
pgd_t *pgd;
@@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
135142
if (likely(pgd != NULL)) {
136143
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
137144
/* Copy kernel mappings */
138-
memcpy(pgd + USER_PTRS_PER_PGD,
139-
init_mm.pgd + USER_PTRS_PER_PGD,
140-
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
145+
sync_kernel_mappings(pgd);
141146
}
142147
return pgd;
143148
}

arch/riscv/include/asm/smp.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
5050
/* Clear IPI for current CPU */
5151
void riscv_clear_ipi(void);
5252

53+
/* Check other CPUs stop or not */
54+
bool smp_crash_stop_failed(void);
55+
5356
/* Secondary hart entry */
5457
asmlinkage void smp_callin(void);
5558

arch/riscv/kernel/entry.S

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -404,6 +404,19 @@ handle_syscall_trace_exit:
404404

405405
#ifdef CONFIG_VMAP_STACK
406406
handle_kernel_stack_overflow:
407+
/*
408+
* Takes the psuedo-spinlock for the shadow stack, in case multiple
409+
* harts are concurrently overflowing their kernel stacks. We could
410+
* store any value here, but since we're overflowing the kernel stack
411+
* already we only have SP to use as a scratch register. So we just
412+
* swap in the address of the spinlock, as that's definately non-zero.
413+
*
414+
* Pairs with a store_release in handle_bad_stack().
415+
*/
416+
1: la sp, spin_shadow_stack
417+
REG_AMOSWAP_AQ sp, sp, (sp)
418+
bnez sp, 1b
419+
407420
la sp, shadow_stack
408421
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
409422

arch/riscv/kernel/machine_kexec.c

Lines changed: 35 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
#include <linux/compiler.h> /* For unreachable() */
1616
#include <linux/cpu.h> /* For cpu_down() */
1717
#include <linux/reboot.h>
18+
#include <linux/interrupt.h>
19+
#include <linux/irq.h>
1820

1921
/*
2022
* kexec_image_info - Print received image details
@@ -138,20 +140,35 @@ void machine_shutdown(void)
138140
#endif
139141
}
140142

141-
/* Override the weak function in kernel/panic.c */
142-
void crash_smp_send_stop(void)
143+
static void machine_kexec_mask_interrupts(void)
143144
{
144-
static int cpus_stopped;
145+
unsigned int i;
146+
struct irq_desc *desc;
145147

146-
/*
147-
* This function can be called twice in panic path, but obviously
148-
* we execute this only once.
149-
*/
150-
if (cpus_stopped)
151-
return;
148+
for_each_irq_desc(i, desc) {
149+
struct irq_chip *chip;
150+
int ret;
151+
152+
chip = irq_desc_get_chip(desc);
153+
if (!chip)
154+
continue;
155+
156+
/*
157+
* First try to remove the active state. If this
158+
* fails, try to EOI the interrupt.
159+
*/
160+
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
161+
162+
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
163+
chip->irq_eoi)
164+
chip->irq_eoi(&desc->irq_data);
152165

153-
smp_send_stop();
154-
cpus_stopped = 1;
166+
if (chip->irq_mask)
167+
chip->irq_mask(&desc->irq_data);
168+
169+
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
170+
chip->irq_disable(&desc->irq_data);
171+
}
155172
}
156173

157174
/*
@@ -169,6 +186,8 @@ machine_crash_shutdown(struct pt_regs *regs)
169186
crash_smp_send_stop();
170187

171188
crash_save_cpu(regs, smp_processor_id());
189+
machine_kexec_mask_interrupts();
190+
172191
pr_info("Starting crashdump kernel...\n");
173192
}
174193

@@ -195,6 +214,11 @@ machine_kexec(struct kimage *image)
195214
void *control_code_buffer = page_address(image->control_code_page);
196215
riscv_kexec_method kexec_method = NULL;
197216

217+
#ifdef CONFIG_SMP
218+
WARN(smp_crash_stop_failed(),
219+
"Some CPUs may be stale, kdump will be unreliable.\n");
220+
#endif
221+
198222
if (image->type != KEXEC_TYPE_CRASH)
199223
kexec_method = control_code_buffer;
200224
else

arch/riscv/kernel/setup.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -322,10 +322,11 @@ subsys_initcall(topology_init);
322322

323323
void free_initmem(void)
324324
{
325-
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
326-
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
327-
IS_ENABLED(CONFIG_64BIT) ?
328-
set_memory_rw : set_memory_rw_nx);
325+
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
326+
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
327+
if (IS_ENABLED(CONFIG_64BIT))
328+
set_kernel_memory(__init_begin, __init_end, set_memory_nx);
329+
}
329330

330331
free_initmem_default(POISON_FREE_INITMEM);
331332
}

arch/riscv/kernel/smp.c

Lines changed: 95 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/clockchips.h>
1313
#include <linux/interrupt.h>
1414
#include <linux/module.h>
15+
#include <linux/kexec.h>
1516
#include <linux/profile.h>
1617
#include <linux/smp.h>
1718
#include <linux/sched.h>
@@ -22,11 +23,13 @@
2223
#include <asm/sbi.h>
2324
#include <asm/tlbflush.h>
2425
#include <asm/cacheflush.h>
26+
#include <asm/cpu_ops.h>
2527

2628
enum ipi_message_type {
2729
IPI_RESCHEDULE,
2830
IPI_CALL_FUNC,
2931
IPI_CPU_STOP,
32+
IPI_CPU_CRASH_STOP,
3033
IPI_IRQ_WORK,
3134
IPI_TIMER,
3235
IPI_MAX
@@ -71,6 +74,32 @@ static void ipi_stop(void)
7174
wait_for_interrupt();
7275
}
7376

77+
#ifdef CONFIG_KEXEC_CORE
78+
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
79+
80+
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
81+
{
82+
crash_save_cpu(regs, cpu);
83+
84+
atomic_dec(&waiting_for_crash_ipi);
85+
86+
local_irq_disable();
87+
88+
#ifdef CONFIG_HOTPLUG_CPU
89+
if (cpu_has_hotplug(cpu))
90+
cpu_ops[cpu]->cpu_stop();
91+
#endif
92+
93+
for(;;)
94+
wait_for_interrupt();
95+
}
96+
#else
97+
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
98+
{
99+
unreachable();
100+
}
101+
#endif
102+
74103
static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
75104

76105
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
@@ -124,8 +153,9 @@ void arch_irq_work_raise(void)
124153

125154
void handle_IPI(struct pt_regs *regs)
126155
{
127-
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
128-
unsigned long *stats = ipi_data[smp_processor_id()].stats;
156+
unsigned int cpu = smp_processor_id();
157+
unsigned long *pending_ipis = &ipi_data[cpu].bits;
158+
unsigned long *stats = ipi_data[cpu].stats;
129159

130160
riscv_clear_ipi();
131161

@@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs)
154184
ipi_stop();
155185
}
156186

187+
if (ops & (1 << IPI_CPU_CRASH_STOP)) {
188+
ipi_cpu_crash_stop(cpu, get_irq_regs());
189+
}
190+
157191
if (ops & (1 << IPI_IRQ_WORK)) {
158192
stats[IPI_IRQ_WORK]++;
159193
irq_work_run();
@@ -176,6 +210,7 @@ static const char * const ipi_names[] = {
176210
[IPI_RESCHEDULE] = "Rescheduling interrupts",
177211
[IPI_CALL_FUNC] = "Function call interrupts",
178212
[IPI_CPU_STOP] = "CPU stop interrupts",
213+
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
179214
[IPI_IRQ_WORK] = "IRQ work interrupts",
180215
[IPI_TIMER] = "Timer broadcast interrupts",
181216
};
@@ -235,6 +270,64 @@ void smp_send_stop(void)
235270
cpumask_pr_args(cpu_online_mask));
236271
}
237272

273+
#ifdef CONFIG_KEXEC_CORE
274+
/*
275+
* The number of CPUs online, not counting this CPU (which may not be
276+
* fully online and so not counted in num_online_cpus()).
277+
*/
278+
static inline unsigned int num_other_online_cpus(void)
279+
{
280+
unsigned int this_cpu_online = cpu_online(smp_processor_id());
281+
282+
return num_online_cpus() - this_cpu_online;
283+
}
284+
285+
void crash_smp_send_stop(void)
286+
{
287+
static int cpus_stopped;
288+
cpumask_t mask;
289+
unsigned long timeout;
290+
291+
/*
292+
* This function can be called twice in panic path, but obviously
293+
* we execute this only once.
294+
*/
295+
if (cpus_stopped)
296+
return;
297+
298+
cpus_stopped = 1;
299+
300+
/*
301+
* If this cpu is the only one alive at this point in time, online or
302+
* not, there are no stop messages to be sent around, so just back out.
303+
*/
304+
if (num_other_online_cpus() == 0)
305+
return;
306+
307+
cpumask_copy(&mask, cpu_online_mask);
308+
cpumask_clear_cpu(smp_processor_id(), &mask);
309+
310+
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
311+
312+
pr_crit("SMP: stopping secondary CPUs\n");
313+
send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
314+
315+
/* Wait up to one second for other CPUs to stop */
316+
timeout = USEC_PER_SEC;
317+
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
318+
udelay(1);
319+
320+
if (atomic_read(&waiting_for_crash_ipi) > 0)
321+
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
322+
cpumask_pr_args(&mask));
323+
}
324+
325+
bool smp_crash_stop_failed(void)
326+
{
327+
return (atomic_read(&waiting_for_crash_ipi) > 0);
328+
}
329+
#endif
330+
238331
void smp_send_reschedule(int cpu)
239332
{
240333
send_ipi_single(cpu, IPI_RESCHEDULE);

arch/riscv/kernel/traps.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,11 +221,29 @@ asmlinkage unsigned long get_overflow_stack(void)
221221
OVERFLOW_STACK_SIZE;
222222
}
223223

224+
/*
225+
* A pseudo spinlock to protect the shadow stack from being used by multiple
226+
* harts concurrently. This isn't a real spinlock because the lock side must
227+
* be taken without a valid stack and only a single register, it's only taken
228+
* while in the process of panicing anyway so the performance and error
229+
* checking a proper spinlock gives us doesn't matter.
230+
*/
231+
unsigned long spin_shadow_stack;
232+
224233
asmlinkage void handle_bad_stack(struct pt_regs *regs)
225234
{
226235
unsigned long tsk_stk = (unsigned long)current->stack;
227236
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
228237

238+
/*
239+
* We're done with the shadow stack by this point, as we're on the
240+
* overflow stack. Tell any other concurrent overflowing harts that
241+
* they can proceed with panicing by releasing the pseudo-spinlock.
242+
*
243+
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
244+
*/
245+
smp_store_release(&spin_shadow_stack, 0);
246+
229247
console_verbose();
230248

231249
pr_emerg("Insufficient stack space to handle exception!\n");

0 commit comments

Comments
 (0)