Skip to content

Commit 13dd5cb

Browse files
wearyzendkalowsk
authored andcommitted
arch: arm: start threads on privileged stack
Use the privileged stack when starting K_USER threads in arch_new_thread(). Threads entering user mode with k_thread_user_mode_enter() keep their existing flow. To support both cases, z_arm_userspace_enter() now takes an internal ABI flag (sp_is_priv) indicating whether PSP already points to the privileged stack. Also fix calculation of the privileged stack top: use priv_stack_end directly instead of priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE, which failed to account for guard/FPU offsets applied to priv_stack_start. Signed-off-by: Sudan Landge <[email protected]> (cherry picked from commit 0438b9f)
1 parent be61610 commit 13dd5cb

File tree

8 files changed

+149
-118
lines changed

8 files changed

+149
-118
lines changed

arch/arm/core/cortex_a_r/thread.c

Lines changed: 50 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
/*
22
* Copyright (c) 2013-2014 Wind River Systems, Inc.
33
* Copyright (c) 2021 Lexmark International, Inc.
4+
* Copyright 2025 Arm Limited and/or its affiliates <[email protected]>
45
*
56
* SPDX-License-Identifier: Apache-2.0
67
*/
@@ -37,6 +38,34 @@
3738
*/
3839
#define DEFAULT_EXC_RETURN 0xFD;
3940

41+
#ifdef CONFIG_USERSPACE
42+
static void setup_priv_stack(struct k_thread *thread)
43+
{
44+
/* Set up privileged stack before entering user mode */
45+
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
46+
47+
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
48+
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
49+
* adjusting priv_stack_start for the mpu guard alignment
50+
*/
51+
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
52+
53+
#if defined(CONFIG_MPU_STACK_GUARD)
54+
/* Stack guard area reserved at the bottom of the thread's
55+
* privileged stack. Adjust the available (writable) stack
56+
* buffer area accordingly.
57+
*/
58+
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
59+
thread->arch.priv_stack_start +=
60+
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
61+
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
62+
#else
63+
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
64+
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
65+
#endif /* CONFIG_MPU_STACK_GUARD */
66+
}
67+
#endif
68+
4069
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
4170
* end of the stack, and thus reusable by the stack when not needed anymore.
4271
*
@@ -80,7 +109,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
80109

81110
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
82111
#if defined(CONFIG_USERSPACE)
112+
thread->arch.priv_stack_start = 0;
83113
if ((thread->base.user_options & K_USER) != 0) {
114+
setup_priv_stack(thread);
115+
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
84116
iframe->pc = (uint32_t)arch_user_mode_enter;
85117
} else {
86118
iframe->pc = (uint32_t)z_thread_entry;
@@ -122,9 +154,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
122154
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
123155
}
124156
#endif
125-
#if defined(CONFIG_USERSPACE)
126-
thread->arch.priv_stack_start = 0;
127-
#endif
128157
#endif
129158
/*
130159
* initial values in all other registers/thread entries are
@@ -196,10 +225,8 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
196225
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
197226
void *p1, void *p2, void *p3)
198227
{
228+
uint32_t sp_is_priv = 1;
199229

200-
/* Set up privileged stack before entering user mode */
201-
_current->arch.priv_stack_start =
202-
(uint32_t)z_priv_stack_find(_current->stack_obj);
203230
#if defined(CONFIG_MPU_STACK_GUARD)
204231
#if defined(CONFIG_THREAD_STACK_INFO)
205232
/* We're dropping to user mode which means the guard area is no
@@ -216,29 +243,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
216243
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217244
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
218245
#endif /* CONFIG_THREAD_STACK_INFO */
219-
220-
/* Stack guard area reserved at the bottom of the thread's
221-
* privileged stack. Adjust the available (writable) stack
222-
* buffer area accordingly.
223-
*/
224-
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
225-
_current->arch.priv_stack_start +=
226-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
227-
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
228-
#else
229-
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
230-
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231246
#endif /* CONFIG_MPU_STACK_GUARD */
232247

233-
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
234-
_current->arch.priv_stack_end =
235-
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
236-
#endif
248+
/* 2 ways how arch_user_mode_enter is called:
249+
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
250+
* already setup and stack pointer points to privileged stack.
251+
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
252+
* setup and stack pointer points to user stack.
253+
*
254+
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
255+
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
256+
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
257+
* sequence which has already pushed stuff on the user stack.
258+
*/
259+
if (0 == _current->arch.priv_stack_start) {
260+
setup_priv_stack(_current);
261+
sp_is_priv = 0;
262+
}
237263

238264
z_arm_userspace_enter(user_entry, p1, p2, p3,
239265
(uint32_t)_current->stack_info.start,
240266
_current->stack_info.size -
241-
_current->stack_info.delta);
267+
_current->stack_info.delta,
268+
sp_is_priv);
242269
CODE_UNREACHABLE;
243270
}
244271

arch/arm/core/cortex_m/thread.c

Lines changed: 49 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,35 @@
4545
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
4646
#endif
4747

48+
#ifdef CONFIG_USERSPACE
49+
static void setup_priv_stack(struct k_thread *thread)
50+
{
51+
/* Set up privileged stack before entering user mode */
52+
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
53+
54+
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
55+
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
56+
* adjusting priv_stack_start for the mpu guard alignment
57+
*/
58+
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
59+
60+
#if defined(CONFIG_MPU_STACK_GUARD)
61+
/* Stack guard area reserved at the bottom of the thread's
62+
* privileged stack. Adjust the available (writable) stack
63+
* buffer area accordingly.
64+
*/
65+
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
66+
thread->arch.priv_stack_start +=
67+
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
68+
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
69+
: MPU_GUARD_ALIGN_AND_SIZE;
70+
#else
71+
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
72+
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
73+
#endif /* CONFIG_MPU_STACK_GUARD */
74+
}
75+
#endif
76+
4877
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
4978
* end of the stack, and thus reusable by the stack when not needed anymore.
5079
*
@@ -87,7 +116,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
87116

88117
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
89118
#if defined(CONFIG_USERSPACE)
119+
thread->arch.priv_stack_start = 0;
90120
if ((thread->base.user_options & K_USER) != 0) {
121+
setup_priv_stack(thread);
122+
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
91123
iframe->pc = (uint32_t)arch_user_mode_enter;
92124
} else {
93125
iframe->pc = (uint32_t)z_thread_entry;
@@ -118,9 +150,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
118150
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
119151
}
120152
#endif
121-
#if defined(CONFIG_USERSPACE)
122-
thread->arch.priv_stack_start = 0;
123-
#endif
124153
#endif
125154
/*
126155
* initial values in all other registers/thread entries are
@@ -215,9 +244,8 @@ uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
215244
#ifdef CONFIG_USERSPACE
216245
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)
217246
{
247+
uint32_t sp_is_priv = 1;
218248

219-
/* Set up privileged stack before entering user mode */
220-
_current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj);
221249
#if defined(CONFIG_MPU_STACK_GUARD)
222250
#if defined(CONFIG_THREAD_STACK_INFO)
223251
/* We're dropping to user mode which means the guard area is no
@@ -234,23 +262,26 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, v
234262
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
235263
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
236264
#endif /* CONFIG_THREAD_STACK_INFO */
265+
#endif /* CONFIG_MPU_STACK_GUARD */
237266

238-
/* Stack guard area reserved at the bottom of the thread's
239-
* privileged stack. Adjust the available (writable) stack
240-
* buffer area accordingly.
267+
/* 2 ways how arch_user_mode_enter is called:
268+
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
269+
* already setup and stack pointer points to privileged stack.
270+
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
271+
* setup and stack pointer points to user stack.
272+
*
273+
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
274+
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
275+
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
276+
* sequence which has already pushed stuff on the user stack.
241277
*/
242-
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
243-
_current->arch.priv_stack_start +=
244-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
245-
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
246-
: MPU_GUARD_ALIGN_AND_SIZE;
247-
#else
248-
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
249-
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
250-
#endif /* CONFIG_MPU_STACK_GUARD */
278+
if (0 == _current->arch.priv_stack_start) {
279+
setup_priv_stack(_current);
280+
sp_is_priv = 0;
281+
}
251282

252283
z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start,
253-
_current->stack_info.size - _current->stack_info.delta);
284+
_current->stack_info.size - _current->stack_info.delta, sp_is_priv);
254285
CODE_UNREACHABLE;
255286
}
256287

arch/arm/core/offsets/offsets_aarch32.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,9 @@ GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
4545
#endif
4646
#if defined(CONFIG_USERSPACE)
4747
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
48-
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
4948
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_end);
49+
50+
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
5051
GEN_OFFSET_SYM(_thread_arch_t, sp_usr);
5152
#endif
5253
#endif

0 commit comments

Comments
 (0)