Skip to content

Commit 26d7bd4

Browse files
Nicolas Pitrecarlescufi
authored andcommitted
riscv: decouple the Zephyr CPU number from the hart ID
Currently it is assumed that Zephyr CPU numbers match their hartid value one for one. This assumption was relied upon to efficiently retrieve the current CPU's `struct _cpu` pointer. People are starting to have systems with a mix of different usage for each CPU and such assumption may no longer be true. Let's completely decouple the hartid from the Zephyr CPU number by stuffing each CPU's `struct _cpu` pointer in their respective scratch register instead. `arch_curr_cpu()` becomes more efficient as well. Since the scratch register was previously used to store userspace's exception stack pointer, that is now moved into `struct _cpu_arch` which implied minor user space entry code cleanup and rationalization. Signed-off-by: Nicolas Pitre <[email protected]>
1 parent 96a65e2 commit 26d7bd4

File tree

9 files changed

+106
-75
lines changed

9 files changed

+106
-75
lines changed

arch/riscv/core/isr.S

Lines changed: 55 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -63,15 +63,13 @@
6363
RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\
6464
RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) )
6565

66-
#ifdef CONFIG_SMP
67-
#define GET_CURRENT_CPU(dst, tmp) \
68-
csrr tmp, mhartid ;\
69-
la dst, _kernel + ___kernel_t_cpus_OFFSET ;\
70-
shiftmul_add dst, tmp, ___cpu_t_SIZEOF
66+
.macro get_current_cpu dst
67+
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
68+
csrr \dst, mscratch
7169
#else
72-
#define GET_CURRENT_CPU(dst, tmp) \
73-
la dst, _kernel + ___kernel_t_cpus_OFFSET
70+
la \dst, _kernel + ___kernel_t_cpus_OFFSET
7471
#endif
72+
.endm
7573

7674
/* imports */
7775
GDATA(_sw_isr_table)
@@ -129,45 +127,27 @@ GTEXT(_isr_wrapper)
129127
SECTION_FUNC(exception.entry, _isr_wrapper)
130128

131129
#ifdef CONFIG_USERSPACE
132-
/*
133-
* The scratch register contains either the privileged stack pointer
134-
* to use when interrupting a user mode thread, or 0 when interrupting
135-
* kernel mode in which case the current stack should be used.
136-
*/
137-
csrrw sp, mscratch, sp
138-
bnez sp, 1f
130+
/* retrieve address of _current_cpu preserving s0 */
131+
csrrw s0, mscratch, s0
139132

140-
/* restore privileged stack pointer and zero the scratch reg */
141-
csrrw sp, mscratch, sp
142-
1:
143-
#endif
133+
/* preserve t0 and t1 temporarily */
134+
sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
135+
sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
144136

145-
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
146-
SOC_ISR_SW_STACKING
147-
#else
148-
/* Save caller-saved registers on current thread stack. */
149-
addi sp, sp, -__z_arch_esf_t_SIZEOF
150-
DO_CALLER_SAVED(sr) ;
151-
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
152-
153-
/* Save s0 in the esf and load it with &_current_cpu. */
154-
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
155-
GET_CURRENT_CPU(s0, t0)
156-
157-
#ifdef CONFIG_USERSPACE
158-
/*
159-
* The scratch register now contains either the user mode stack
160-
* pointer, or 0 if entered from kernel mode. Retrieve that value
161-
* and zero the scratch register as we are in kernel mode now.
162-
*/
163-
csrrw t0, mscratch, zero
137+
/* determine if we come from user space */
138+
csrr t0, mstatus
139+
li t1, MSTATUS_MPP
140+
and t0, t0, t1
164141
bnez t0, 1f
165142

166-
/* came from kernel mode: adjust stack value */
167-
add t0, sp, __z_arch_esf_t_SIZEOF
168-
1:
169-
/* save stack value to be restored later */
170-
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
143+
/* in user space we were: switch to our privileged stack */
144+
mv t0, sp
145+
lr sp, _curr_cpu_arch_user_exc_sp(s0)
146+
147+
/* Save user stack value. Coming from user space, we know this
148+
* can't overflow the privileged stack. The esf will be allocated
149+
* later but it is safe to store our saved user sp here. */
150+
sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp)
171151

172152
/* Make sure tls pointer is sane */
173153
lr t0, ___cpu_t_current_OFFSET(s0)
@@ -177,8 +157,27 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
177157
lui t0, %tprel_hi(is_user_mode)
178158
add t0, t0, tp, %tprel_add(is_user_mode)
179159
sb zero, %tprel_lo(is_user_mode)(t0)
160+
1:
161+
/* retrieve original t0/t1 values */
162+
lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
163+
lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
164+
165+
/* retrieve original s0 and restore _current_cpu in mscratch */
166+
csrrw s0, mscratch, s0
180167
#endif
181168

169+
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
170+
SOC_ISR_SW_STACKING
171+
#else
172+
/* Save caller-saved registers on current thread stack. */
173+
addi sp, sp, -__z_arch_esf_t_SIZEOF
174+
DO_CALLER_SAVED(sr) ;
175+
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
176+
177+
/* Save s0 in the esf and load it with &_current_cpu. */
178+
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
179+
get_current_cpu s0
180+
182181
/* Save MEPC register */
183182
csrr t0, mepc
184183
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
@@ -531,7 +530,7 @@ z_riscv_thread_start:
531530
might_have_rescheduled:
532531
#ifdef CONFIG_SMP
533532
/* reload s0 with &_current_cpu as it might have changed */
534-
GET_CURRENT_CPU(s0, t0)
533+
get_current_cpu s0
535534
#endif
536535

537536
no_reschedule:
@@ -572,8 +571,8 @@ no_fp: /* make sure this is reflected in the restored mstatus */
572571
#ifdef CONFIG_USERSPACE
573572
/*
574573
* Check if we are returning to user mode. If so then we must
575-
* set is_user_mode to true and load the scratch register with
576-
* the stack pointer to be used with the next exception to come.
574+
* set is_user_mode to true and preserve our kernel mode stack for
575+
* the next exception to come.
577576
*/
578577
li t1, MSTATUS_MPP
579578
and t0, t2, t1
@@ -591,10 +590,19 @@ no_fp: /* make sure this is reflected in the restored mstatus */
591590
add t0, t0, tp, %tprel_add(is_user_mode)
592591
sb t1, %tprel_lo(is_user_mode)(t0)
593592

594-
/* load scratch reg with stack pointer for next exception entry */
593+
/* preserve stack pointer for next exception entry */
595594
add t0, sp, __z_arch_esf_t_SIZEOF
596-
csrw mscratch, t0
595+
sr t0, _curr_cpu_arch_user_exc_sp(s0)
596+
597+
j 2f
597598
1:
599+
/*
600+
* We are returning to kernel mode. Store the stack pointer to
601+
* be re-loaded further down.
602+
*/
603+
addi t0, sp, __z_arch_esf_t_SIZEOF
604+
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
605+
2:
598606
#endif
599607

600608
/* Restore s0 (it is no longer ours) */

arch/riscv/core/offsets/offsets.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,4 +121,10 @@ GEN_SOC_OFFSET_SYMS();
121121

122122
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
123123

124+
#ifdef CONFIG_USERSPACE
125+
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_sp);
126+
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp0);
127+
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp1);
128+
#endif
129+
124130
GEN_ABS_SYM_END

arch/riscv/core/smp.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
3333

3434
void z_riscv_secondary_cpu_init(int cpu_num)
3535
{
36+
csr_write(mscratch, &_kernel.cpus[cpu_num]);
3637
#ifdef CONFIG_THREAD_LOCAL_STORAGE
3738
__asm__("mv tp, %0" : : "r" (z_idle_threads[cpu_num].tls));
3839
#endif

arch/riscv/core/thread.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
8080
/* Clear user thread context */
8181
z_riscv_pmp_usermode_init(thread);
8282
thread->arch.priv_stack_start = 0;
83-
84-
/* the unwound stack pointer upon exiting exception */
85-
stack_init->sp = (unsigned long)(stack_init + 1);
8683
#endif /* CONFIG_USERSPACE */
8784

8885
/* Assign thread entry point and mstatus.MPRV mode. */
@@ -242,8 +239,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
242239
z_riscv_pmp_usermode_prepare(_current);
243240
z_riscv_pmp_usermode_enable(_current);
244241

245-
/* exception stack has to be in mscratch */
246-
csr_write(mscratch, top_of_priv_stack);
242+
/* preserve stack pointer for next exception entry */
243+
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
247244

248245
is_user_mode = true;
249246

arch/riscv/include/kernel_arch_func.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ static ALWAYS_INLINE void arch_kernel_init(void)
2929
#ifdef CONFIG_THREAD_LOCAL_STORAGE
3030
__asm__ volatile ("li tp, 0");
3131
#endif
32-
#ifdef CONFIG_USERSPACE
33-
csr_write(mscratch, 0);
32+
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
33+
csr_write(mscratch, &_kernel.cpus[0]);
3434
#endif
3535
#ifdef CONFIG_RISCV_PMP
3636
z_riscv_pmp_init();

arch/riscv/include/offsets_short_arch.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,6 @@
99

1010
#include <offsets.h>
1111

12-
/* kernel */
13-
14-
/* nothing for now */
15-
16-
/* end - kernel */
17-
18-
/* threads */
19-
2012
#define _thread_offset_to_sp \
2113
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
2214

@@ -109,12 +101,22 @@
109101
#endif /* defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) */
110102

111103
#ifdef CONFIG_USERSPACE
104+
112105
#define _thread_offset_to_priv_stack_start \
113106
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
107+
114108
#define _thread_offset_to_user_sp \
115109
(___thread_t_arch_OFFSET + ___thread_arch_t_user_sp_OFFSET)
116-
#endif
117110

118-
/* end - threads */
111+
#define _curr_cpu_arch_user_exc_sp \
112+
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_sp_OFFSET)
113+
114+
#define _curr_cpu_arch_user_exc_tmp0 \
115+
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp0_OFFSET)
116+
117+
#define _curr_cpu_arch_user_exc_tmp1 \
118+
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp1_OFFSET)
119+
120+
#endif
119121

120122
#endif /* ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

include/zephyr/arch/riscv/arch_inlines.h

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,24 +10,20 @@
1010
#ifndef _ASMLANGUAGE
1111

1212
#include <zephyr/kernel_structs.h>
13+
#include "csr.h"
1314

1415
static ALWAYS_INLINE uint32_t arch_proc_id(void)
1516
{
16-
uint32_t hartid;
17-
18-
#ifdef CONFIG_SMP
19-
__asm__ volatile("csrr %0, mhartid" : "=r" (hartid));
20-
#else
21-
hartid = 0;
22-
#endif
23-
24-
return hartid;
17+
return csr_read(mhartid);
2518
}
2619

2720
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
2821
{
29-
/* linear hartid enumeration space assumed */
30-
return &_kernel.cpus[arch_proc_id()];
22+
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
23+
return (_cpu_t *)csr_read(mscratch);
24+
#else
25+
return &_kernel.cpus[0];
26+
#endif
3127
}
3228

3329
static ALWAYS_INLINE unsigned int arch_num_cpus(void)

include/zephyr/arch/riscv/structs.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
/*
2+
* Copyright (c) BayLibre SAS
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#ifndef ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
8+
#define ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
9+
10+
/* Per CPU architecture specifics */
11+
struct _cpu_arch {
12+
#ifdef CONFIG_USERSPACE
13+
unsigned long user_exc_sp;
14+
unsigned long user_exc_tmp0;
15+
unsigned long user_exc_tmp1;
16+
#endif
17+
};
18+
19+
#endif /* ZEPHYR_INCLUDE_RISCV_STRUCTS_H_ */

include/zephyr/arch/structs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525

2626
#if defined(CONFIG_ARM64)
2727
#include <zephyr/arch/arm64/structs.h>
28+
#elif defined(CONFIG_RISCV)
29+
#include <zephyr/arch/riscv/structs.h>
2830
#else
2931

3032
/* Default definitions when no architecture specific definitions exist. */

0 commit comments

Comments
 (0)