11/*
22 * Copyright (c) 2013-2014 Wind River Systems, Inc.
33 * Copyright (c) 2021 Lexmark International, Inc.
4+ * Copyright 2025 Arm Limited and/or its affiliates <[email protected] > 45 *
56 * SPDX-License-Identifier: Apache-2.0
67 */
3738 */
3839#define DEFAULT_EXC_RETURN 0xFD;
3940
41+ #ifdef CONFIG_USERSPACE
42+ static void setup_priv_stack (struct k_thread * thread )
43+ {
44+ /* Set up privileged stack before entering user mode */
45+ thread -> arch .priv_stack_start = (uint32_t )z_priv_stack_find (thread -> stack_obj );
46+
47+ /* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
48+ * MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
49+ * adjusting priv_stack_start for the mpu guard alignment
50+ */
51+ thread -> arch .priv_stack_end = thread -> arch .priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE ;
52+
53+ #if defined(CONFIG_MPU_STACK_GUARD )
54+ /* Stack guard area reserved at the bottom of the thread's
55+ * privileged stack. Adjust the available (writable) stack
56+ * buffer area accordingly.
57+ */
58+ #if defined(CONFIG_FPU ) && defined(CONFIG_FPU_SHARING )
59+ thread -> arch .priv_stack_start +=
60+ ((thread -> arch .mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk ) != 0 ) ?
61+ MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE ;
62+ #else
63+ thread -> arch .priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE ;
64+ #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
65+ #endif /* CONFIG_MPU_STACK_GUARD */
66+ }
67+ #endif
68+
4069/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
4170 * end of the stack, and thus reusable by the stack when not needed anymore.
4271 *
@@ -80,7 +109,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
80109
81110 iframe = Z_STACK_PTR_TO_FRAME (struct __basic_sf , stack_ptr );
82111#if defined(CONFIG_USERSPACE )
112+ thread -> arch .priv_stack_start = 0 ;
83113 if ((thread -> base .user_options & K_USER ) != 0 ) {
114+ setup_priv_stack (thread );
115+ iframe = Z_STACK_PTR_TO_FRAME (struct __basic_sf , thread -> arch .priv_stack_end );
84116 iframe -> pc = (uint32_t )arch_user_mode_enter ;
85117 } else {
86118 iframe -> pc = (uint32_t )z_thread_entry ;
@@ -122,9 +154,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
122154 thread -> arch .mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk ;
123155 }
124156#endif
125- #if defined(CONFIG_USERSPACE )
126- thread -> arch .priv_stack_start = 0 ;
127- #endif
128157#endif
129158 /*
130159 * initial values in all other registers/thread entries are
@@ -196,10 +225,8 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
196225FUNC_NORETURN void arch_user_mode_enter (k_thread_entry_t user_entry ,
197226 void * p1 , void * p2 , void * p3 )
198227{
228+ uint32_t sp_is_priv = 1 ;
199229
200- /* Set up privileged stack before entering user mode */
201- _current -> arch .priv_stack_start =
202- (uint32_t )z_priv_stack_find (_current -> stack_obj );
203230#if defined(CONFIG_MPU_STACK_GUARD )
204231#if defined(CONFIG_THREAD_STACK_INFO )
205232 /* We're dropping to user mode which means the guard area is no
@@ -216,29 +243,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
216243 _current -> stack_info .start -= MPU_GUARD_ALIGN_AND_SIZE ;
217244 _current -> stack_info .size += MPU_GUARD_ALIGN_AND_SIZE ;
218245#endif /* CONFIG_THREAD_STACK_INFO */
219-
220- /* Stack guard area reserved at the bottom of the thread's
221- * privileged stack. Adjust the available (writable) stack
222- * buffer area accordingly.
223- */
224- #if defined(CONFIG_FPU ) && defined(CONFIG_FPU_SHARING )
225- _current -> arch .priv_stack_start +=
226- ((_current -> arch .mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk ) != 0 ) ?
227- MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE ;
228- #else
229- _current -> arch .priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE ;
230- #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231246#endif /* CONFIG_MPU_STACK_GUARD */
232247
233- #if defined(CONFIG_CPU_AARCH32_CORTEX_R )
234- _current -> arch .priv_stack_end =
235- _current -> arch .priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE ;
236- #endif
248+ /* 2 ways how arch_user_mode_enter is called:
249+ * - called as part of context switch from z_arm_pendsv, in this case privileged stack is
250+ * already setup and stack pointer points to privileged stack.
251+ * - called directly from k_thread_user_mode_enter, in this case privileged stack is not
252+ * setup and stack pointer points to user stack.
253+ *
254+ * When called from k_thread_user_mode_enter, we need to check and setup the privileged
255+ * stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
256+ * Note that we do not change the PSP in this function to avoid any conflict with compiler's
257+ * sequence which has already pushed stuff on the user stack.
258+ */
259+ if (0 == _current -> arch .priv_stack_start ) {
260+ setup_priv_stack (_current );
261+ sp_is_priv = 0 ;
262+ }
237263
238264 z_arm_userspace_enter (user_entry , p1 , p2 , p3 ,
239265 (uint32_t )_current -> stack_info .start ,
240266 _current -> stack_info .size -
241- _current -> stack_info .delta );
267+ _current -> stack_info .delta ,
268+ sp_is_priv );
242269 CODE_UNREACHABLE ;
243270}
244271
0 commit comments