Skip to content

Commit 0438b9f

Browse files
wearyzenjhedberg
authored andcommitted
arch: arm: start threads on privileged stack
Use the privileged stack when starting K_USER threads in arch_new_thread(). Threads entering user mode with k_thread_user_mode_enter() keep their existing flow. To support both cases, z_arm_userspace_enter() now takes an internal ABI flag (sp_is_priv) indicating whether PSP already points to the privileged stack. Also fix calculation of the privileged stack top: use priv_stack_end directly instead of priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE, which failed to account for guard/FPU offsets applied to priv_stack_start. Signed-off-by: Sudan Landge <[email protected]>
1 parent b827253 commit 0438b9f

File tree

8 files changed

+149
-118
lines changed

8 files changed

+149
-118
lines changed

arch/arm/core/cortex_a_r/thread.c

Lines changed: 50 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
/*
22
* Copyright (c) 2013-2014 Wind River Systems, Inc.
33
* Copyright (c) 2021 Lexmark International, Inc.
4+
* Copyright 2025 Arm Limited and/or its affiliates <[email protected]>
45
*
56
* SPDX-License-Identifier: Apache-2.0
67
*/
@@ -37,6 +38,34 @@
3738
*/
3839
#define DEFAULT_EXC_RETURN 0xFD;
3940

41+
#ifdef CONFIG_USERSPACE
42+
static void setup_priv_stack(struct k_thread *thread)
43+
{
44+
/* Set up privileged stack before entering user mode */
45+
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
46+
47+
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
48+
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
49+
* adjusting priv_stack_start for the mpu guard alignment
50+
*/
51+
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
52+
53+
#if defined(CONFIG_MPU_STACK_GUARD)
54+
/* Stack guard area reserved at the bottom of the thread's
55+
* privileged stack. Adjust the available (writable) stack
56+
* buffer area accordingly.
57+
*/
58+
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
59+
thread->arch.priv_stack_start +=
60+
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
61+
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
62+
#else
63+
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
64+
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
65+
#endif /* CONFIG_MPU_STACK_GUARD */
66+
}
67+
#endif
68+
4069
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
4170
* end of the stack, and thus reusable by the stack when not needed anymore.
4271
*
@@ -80,7 +109,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
80109

81110
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
82111
#if defined(CONFIG_USERSPACE)
112+
thread->arch.priv_stack_start = 0;
83113
if ((thread->base.user_options & K_USER) != 0) {
114+
setup_priv_stack(thread);
115+
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
84116
iframe->pc = (uint32_t)arch_user_mode_enter;
85117
} else {
86118
iframe->pc = (uint32_t)z_thread_entry;
@@ -122,9 +154,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
122154
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
123155
}
124156
#endif
125-
#if defined(CONFIG_USERSPACE)
126-
thread->arch.priv_stack_start = 0;
127-
#endif
128157
#endif
129158
/*
130159
* initial values in all other registers/thread entries are
@@ -196,10 +225,8 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
196225
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
197226
void *p1, void *p2, void *p3)
198227
{
228+
uint32_t sp_is_priv = 1;
199229

200-
/* Set up privileged stack before entering user mode */
201-
_current->arch.priv_stack_start =
202-
(uint32_t)z_priv_stack_find(_current->stack_obj);
203230
#if defined(CONFIG_MPU_STACK_GUARD)
204231
#if defined(CONFIG_THREAD_STACK_INFO)
205232
/* We're dropping to user mode which means the guard area is no
@@ -216,29 +243,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
216243
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217244
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
218245
#endif /* CONFIG_THREAD_STACK_INFO */
219-
220-
/* Stack guard area reserved at the bottom of the thread's
221-
* privileged stack. Adjust the available (writable) stack
222-
* buffer area accordingly.
223-
*/
224-
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
225-
_current->arch.priv_stack_start +=
226-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
227-
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
228-
#else
229-
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
230-
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231246
#endif /* CONFIG_MPU_STACK_GUARD */
232247

233-
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
234-
_current->arch.priv_stack_end =
235-
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
236-
#endif
248+
/* 2 ways how arch_user_mode_enter is called:
249+
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
250+
* already setup and stack pointer points to privileged stack.
251+
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
252+
* setup and stack pointer points to user stack.
253+
*
254+
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
255+
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
256+
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
257+
* sequence which has already pushed stuff on the user stack.
258+
*/
259+
if (0 == _current->arch.priv_stack_start) {
260+
setup_priv_stack(_current);
261+
sp_is_priv = 0;
262+
}
237263

238264
z_arm_userspace_enter(user_entry, p1, p2, p3,
239265
(uint32_t)_current->stack_info.start,
240266
_current->stack_info.size -
241-
_current->stack_info.delta);
267+
_current->stack_info.delta,
268+
sp_is_priv);
242269
CODE_UNREACHABLE;
243270
}
244271

arch/arm/core/cortex_m/thread.c

Lines changed: 49 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,35 @@
4646
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
4747
#endif
4848

49+
#ifdef CONFIG_USERSPACE
50+
static void setup_priv_stack(struct k_thread *thread)
51+
{
52+
/* Set up privileged stack before entering user mode */
53+
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
54+
55+
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
56+
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
57+
* adjusting priv_stack_start for the mpu guard alignment
58+
*/
59+
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
60+
61+
#if defined(CONFIG_MPU_STACK_GUARD)
62+
/* Stack guard area reserved at the bottom of the thread's
63+
* privileged stack. Adjust the available (writable) stack
64+
* buffer area accordingly.
65+
*/
66+
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
67+
thread->arch.priv_stack_start +=
68+
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
69+
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
70+
: MPU_GUARD_ALIGN_AND_SIZE;
71+
#else
72+
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
73+
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
74+
#endif /* CONFIG_MPU_STACK_GUARD */
75+
}
76+
#endif
77+
4978
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
5079
* end of the stack, and thus reusable by the stack when not needed anymore.
5180
*
@@ -88,7 +117,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
88117

89118
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
90119
#if defined(CONFIG_USERSPACE)
120+
thread->arch.priv_stack_start = 0;
91121
if ((thread->base.user_options & K_USER) != 0) {
122+
setup_priv_stack(thread);
123+
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
92124
iframe->pc = (uint32_t)arch_user_mode_enter;
93125
} else {
94126
iframe->pc = (uint32_t)z_thread_entry;
@@ -119,9 +151,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
119151
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
120152
}
121153
#endif
122-
#if defined(CONFIG_USERSPACE)
123-
thread->arch.priv_stack_start = 0;
124-
#endif
125154
#endif
126155
#ifdef CONFIG_ARM_PAC_PER_THREAD
127156
/* Generate PAC key and save it in thread context to be set later
@@ -222,9 +251,8 @@ uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
222251
#ifdef CONFIG_USERSPACE
223252
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)
224253
{
254+
uint32_t sp_is_priv = 1;
225255

226-
/* Set up privileged stack before entering user mode */
227-
_current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj);
228256
#if defined(CONFIG_MPU_STACK_GUARD)
229257
#if defined(CONFIG_THREAD_STACK_INFO)
230258
/* We're dropping to user mode which means the guard area is no
@@ -241,23 +269,26 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, v
241269
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
242270
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
243271
#endif /* CONFIG_THREAD_STACK_INFO */
272+
#endif /* CONFIG_MPU_STACK_GUARD */
244273

245-
/* Stack guard area reserved at the bottom of the thread's
246-
* privileged stack. Adjust the available (writable) stack
247-
* buffer area accordingly.
274+
/* 2 ways how arch_user_mode_enter is called:
275+
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
276+
* already setup and stack pointer points to privileged stack.
277+
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
278+
* setup and stack pointer points to user stack.
279+
*
280+
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
281+
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
282+
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
283+
* sequence which has already pushed stuff on the user stack.
248284
*/
249-
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
250-
_current->arch.priv_stack_start +=
251-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
252-
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
253-
: MPU_GUARD_ALIGN_AND_SIZE;
254-
#else
255-
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
256-
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
257-
#endif /* CONFIG_MPU_STACK_GUARD */
285+
if (0 == _current->arch.priv_stack_start) {
286+
setup_priv_stack(_current);
287+
sp_is_priv = 0;
288+
}
258289

259290
z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start,
260-
_current->stack_info.size - _current->stack_info.delta);
291+
_current->stack_info.size - _current->stack_info.delta, sp_is_priv);
261292
CODE_UNREACHABLE;
262293
}
263294

arch/arm/core/offsets/offsets_aarch32.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,9 @@ GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
5050
#endif
5151
#if defined(CONFIG_USERSPACE)
5252
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
53-
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
5453
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_end);
54+
55+
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
5556
GEN_OFFSET_SYM(_thread_arch_t, sp_usr);
5657
#endif
5758
#endif

0 commit comments

Comments
 (0)