Skip to content

Commit 319c697

Browse files
wearyzennashif
authored andcommitted
arch: arm: switch to privilege stack in SVC handler
Initialize the privilege stack and switch PSP to it early in the SVC handler to ensure `z_arm_do_syscall` does not start on a user-accessible stack frame. Signed-off-by: Sudan Landge <[email protected]>
1 parent 52151d0 commit 319c697

File tree

3 files changed

+251
-66
lines changed

3 files changed

+251
-66
lines changed

arch/arm/core/cortex_m/swap_helper.S

Lines changed: 212 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -508,20 +508,219 @@ SECTION_FUNC(TEXT, z_arm_svc)
508508
* r8 - saved link register
509509
*/
510510
.L_do_syscall:
511+
/*
512+
* Build a privilege stack frame from the user stack frame, then switch PSP
513+
* to it. This ensures return from SVC does not rely on the user stack.
514+
*
515+
* Layout of privilege stack created from user stack:
516+
*
517+
* +------+-------------------------+------+-------------------------+--------------------------+
518+
* | User stack | Privilege stack | Notes |
519+
* +------+-------------------------+------+-------------------------+--------------------------+
520+
* |Offset| contents |Offset| contents | |
521+
* +------+-------------------------+------+-------------------------+--------------------------+
522+
* | 0 | R0 -> | 0 | R0 | PSP switches from 0th |
523+
* | | | | | offset of user stack to |
524+
* | | | | | 0th offset of priv stack |
525+
* | 4 | R1 -> | 4 | R1 | |
526+
* | 8 | R2 -> | 8 | R2 | |
527+
* | 12 | R3 -> |12 | R3 | |
528+
* | 16 | R12 -> |16 | R12 | |
529+
* | 20 | LR -> |20 | LR | |
530+
* | 24 | Return Address -x> |24 | z_arm_do_syscall |return address from user |
531+
* | | | | |sf is not copied. Instead,|
532+
* | | | | |it is replaced so that |
533+
* | | | | |z_arm_svc returns to |
534+
* | | | | |z_arm_do_syscall. |
535+
* | | | | | |
536+
* | 28 | xPSR (w/ or w/o pad) -> |28 | xPSR (pad bit cleared) |This completes the basic |
537+
* | | | | |exception sf w/ or w/o pad|
538+
* | | | | | |
539+
* | -- | FP regs + FPSCR -> |-- | FP regs + FPSCR |For arch supporting fp |
540+
* | | (w/ or w/o pad) | | |context an additional |
541+
* | | | | |extended sf is copied. |
542+
* |________________________________|______|_________________________|__________________________|
543+
* | | | | |On returning to |
544+
* | | | | |z_arm_do_syscall, the |
545+
* | | | | |above sf has already been |
546+
* | | | | |unstacked and 8B from the |
547+
* | | | | |then sf are used to pass |
548+
* | | | | |original pre-svc sp & the |
549+
* | | | | |return address. |
550+
* | | | | |Note: at the moment |
551+
* | | | | |z_arm_do_syscall also |
552+
* | | | | |expects the return address|
553+
* | | | | |to be set in r8. |
554+
* | | | | | |
555+
* | | | 0 | address that |z_arm_do_syscall expects |
556+
* | | | | z_arm_do_syscall should |the original pre-svc sp at|
557+
* | | | | set as PSP before |0th offset i.e. new sp[0] |
558+
* | | | | returning from svc. |and, |
559+
* | | | | | |
560+
* | | | 4 | Address that |the return address at |
561+
* | | | | z_arm_do_syscall should |sp[4]. Note that this is |
562+
* | | | | return to after handling|the return address copied |
563+
* | | | | svc |from user exception sf[24]|
564+
* | | | | |which was not copied in |
565+
* | | | | |the previous sf. |
566+
* +------+-------------------------+------+-------------------------+--------------------------+
567+
* "sf" in this function is used as abbreviation for "stack frame".
568+
* Note that the "FP regs + FPSCR" are only present if CONFIG_FPU_SHARING=y, and the optional pad
569+
* is only present if PSP was not 8-byte aligned when SVC was executed.
570+
* Also note that FPU cannot be present in ARMv6-M or ARMv8-M Baseline implementations
571+
* (i.e., it may only be present when CONFIG_ARMV7_M_ARMV8_M_MAINLINE is enabled).
572+
*/
573+
/* Start by fetching the top of privileged stack */
511574
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
512-
movs r3, #24
513-
ldr r1, [r0, r3] /* grab address of PC from stack frame */
514-
mov r8, r1
575+
ldr r1, =_kernel
576+
ldr r1, [r1, #_kernel_offset_to_current]
577+
adds r1, r1, #_thread_offset_to_priv_stack_start
578+
ldr r1, [r1] /* bottom of priv stack */
579+
ldr r3, =CONFIG_PRIVILEGED_STACK_SIZE
580+
subs r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+8) /* 8 for original sp and pc */
581+
add r1, r3
582+
mov ip, r1
515583
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
516-
ldr r8, [r0, #24] /* grab address of PC from stack frame */
584+
ldr ip, =_kernel
585+
ldr ip, [ip, #_kernel_offset_to_current]
586+
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* bottom of priv stack */
587+
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
588+
#ifdef CONFIG_FPU_SHARING
589+
/* Assess whether svc calling thread had been using the FP registers. */
590+
tst lr, #_EXC_RETURN_FTYPE_Msk
591+
ite eq
592+
moveq r8, #_EXC_HW_SAVED_EXTENDED_SF_SIZE
593+
movne r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
594+
#else
595+
mov r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
517596
#endif
518-
ldr r1, =z_arm_do_syscall
597+
sub ip, #8 /* z_arm_do_syscall will use this to get original sp and pc */
598+
sub ip, r8 /* 32 for basic sf + 72 for the optional esf */
599+
#endif
600+
601+
/*
602+
* At this point:
603+
* r0 has PSP i.e. top of user stack
604+
* ip has top of privilege stack
605+
* r8 has hardware-saved stack frame size (only in case of mainline)
606+
*/
607+
push {r4-r7}
608+
push {r2}
519609
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
520-
str r1, [r0, r3] /* overwrite the PC to point to z_arm_do_syscall */
610+
mov r2, r0 /* safe to use r2 since it is saved on MSP */
611+
612+
/* Check for padding in the sf */
613+
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
614+
movs r3, #1
615+
/* Check if pad bit 9 is set */
616+
lsls r3, r3, #9
617+
tst r1, r3
618+
beq .L_no_padding
619+
/* special handling for padded sf */
620+
bics r1, r3 /* clear the pad bit (priv stack is aligned and doesn't need it) */
621+
adds r2, #4
622+
.L_no_padding:
623+
/* Calculate original pre-svc user sp which is psp + sf size (+4B if pad bit was set) */
624+
adds r2, #_EXC_HW_SAVED_BASIC_SF_SIZE
625+
mov r3, ip
626+
str r2,[r3, #0]
627+
628+
/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
629+
* as detailed in the table above.
630+
*/
631+
str r2,[r3, #_EXC_HW_SAVED_BASIC_SF_SIZE]
632+
/* sf of priv stack has the same xPSR as user stack but with 9th bit reset */
633+
str r1,[r3, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET]
634+
635+
/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
636+
mov r1, r0
637+
mov r2, r3
638+
ldmia r1!, {r4-r7}
639+
stmia r2!, {r4-r7}
640+
ldmia r1!, {r4-r5}
641+
stmia r2!, {r4-r5}
642+
643+
/* Store the svc return address at the offset expected by z_arm_do_syscall,
644+
* as detailed in the table above.
645+
*/
646+
str r5, [r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+4)]
647+
648+
ldr r1, =z_arm_do_syscall
649+
str r1, [r3, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
650+
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
651+
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
652+
*/
653+
mov r8, r1
654+
521655
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
522-
str r1, [r0, #24] /* overwrite the PC to point to z_arm_do_syscall */
656+
mov r2, r0 /* safe to use r2 since it is saved on MSP */
657+
658+
/* Calculate original pre-svc user sp without pad which is psp + sf size */
659+
add r2, r8
660+
661+
/* Also, check for padding in the sf */
662+
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
663+
tst r1, #(1<<9) /* Check if pad bit 9 is set */
664+
beq .L_no_padding
665+
bics r1, #(1<<9) /* clear the pad bit (priv stack is aligned and doesn't need it) */
666+
/* Calculate original pre-svc user sp with pad */
667+
add r2, #4
668+
.L_no_padding:
669+
str r2,[ip, #0]
670+
/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
671+
* as detailed in the table above.
672+
*/
673+
str r2,[ip, r8]
674+
str r1,[ip, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* priv sf get user sf xPSR with bit9 reset */
675+
676+
/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
677+
mov r1, r0
678+
mov r2, ip
679+
ldmia r1!, {r4-r7}
680+
stmia r2!, {r4-r7}
681+
ldmia r1!, {r4-r5}
682+
stmia r2!, {r4-r5}
683+
684+
/* Store the svc return address at the offset expected by z_arm_do_syscall,
685+
* as detailed in the table above.
686+
*/
687+
add r8, #4
688+
str r5, [ip, r8]
689+
690+
ldr r1, =z_arm_do_syscall
691+
str r1, [ip, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
692+
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
693+
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
694+
*/
695+
mov r8, r1
696+
697+
/* basic stack frame is copied at this point to privilege stack,
698+
* now time to copy the fp context
699+
*/
700+
#ifdef CONFIG_FPU_SHARING
701+
tst lr, #_EXC_RETURN_FTYPE_Msk
702+
bne .L_skip_fp_copy
703+
add r1, r0, #32
704+
add r2, ip, #32
705+
706+
vldmia r1!, {s0-s15}
707+
vstmia r2!, {s0-s15}
708+
709+
/* copy FPSCR + reserved (8 bytes) */
710+
ldmia r1!, {r4, r5}
711+
stmia r2!, {r4, r5}
712+
.L_skip_fp_copy:
523713
#endif
524714

715+
#endif
716+
pop {r2} /* restore CONTROL value */
717+
pop {r4-r7}
718+
719+
/* Point PSP to privilege stack,
720+
* note that r0 still has the old PSP
721+
*/
722+
msr PSP, ip
723+
525724
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
526725
ldr r3, =K_SYSCALL_LIMIT
527726
cmp r6, r3
@@ -575,14 +774,12 @@ SECTION_FUNC(TEXT, z_arm_svc)
575774
isb
576775

577776
#if defined(CONFIG_BUILTIN_STACK_GUARD)
578-
/* Thread is now in privileged mode; after returning from SCVall it
579-
* will use the default (user) stack before switching to the privileged
580-
* stack to execute the system call. We need to protect the user stack
581-
* against stack overflows until this stack transition.
582-
*/
583-
ldr r1, [r0, #_thread_offset_to_stack_info_start] /* stack_info.start */
584-
msr PSPLIM, r1
585-
#endif /* CONFIG_BUILTIN_STACK_GUARD */
777+
/* Set stack pointer limit (needed in privileged mode) */
778+
ldr ip, =_kernel
779+
ldr ip, [ip, #_kernel_offset_to_current]
780+
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
781+
msr PSPLIM, ip
782+
#endif
586783

587784
/* return from SVC to the modified LR - z_arm_do_syscall */
588785
bx lr

arch/arm/core/userspace.S

Lines changed: 4 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
* Userspace and service handler hooks
33
*
44
* Copyright (c) 2017 Linaro Limited
5+
* Copyright 2025 Arm Limited and/or its affiliates <open-source-[email protected]>
56
*
67
* SPDX-License-Identifier: Apache-2.0
78
*
@@ -322,9 +323,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
322323
* This function is used to do system calls from unprivileged code. This
323324
* function is responsible for the following:
324325
* 1) Fixing up bad syscalls
325-
* 2) Configuring privileged stack and loading up stack arguments
326-
* 3) Dispatching the system call
327-
* 4) Restoring stack and calling back to the caller of the SVC
326+
* 2) Dispatching the system call
327+
* 3) Restoring stack and calling back to the caller of the SVC
328328
*
329329
*/
330330
SECTION_FUNC(TEXT, z_arm_do_syscall)
@@ -342,41 +342,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
342342
* At this point PSPLIM is already configured to guard the default (user)
343343
* stack, so pushing to the default thread's stack is safe.
344344
*/
345-
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
346-
/* save current stack pointer (user stack) */
347-
mov ip, sp
348-
/* temporarily push to user stack */
349-
push {r0,r1}
350-
/* setup privileged stack */
351-
ldr r0, =_kernel
352-
ldr r0, [r0, #_kernel_offset_to_current]
353-
adds r0, r0, #_thread_offset_to_priv_stack_start
354-
ldr r0, [r0] /* priv stack ptr */
355-
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
356-
add r0, r1
357-
358-
/* Store current SP and LR at the beginning of the priv stack */
359-
subs r0, #8
360-
mov r1, ip
361-
str r1, [r0, #0]
362-
mov r1, lr
363-
str r1, [r0, #4]
364-
mov ip, r0
365-
/* Restore user stack and original r0, r1 */
366-
pop {r0, r1}
367-
368-
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
369-
/* setup privileged stack */
370-
ldr ip, =_kernel
371-
ldr ip, [ip, #_kernel_offset_to_current]
372-
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
373-
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
374-
375-
/* Store current SP and LR at the beginning of the priv stack */
376-
subs ip, #8
377-
str sp, [ip, #0]
378-
str lr, [ip, #4]
379-
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
345+
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
380346
/*
381347
* The SVC handler has already switched to the privileged stack.
382348
* Store the user SP and LR at the beginning of the priv stack.
@@ -387,11 +353,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
387353
push {ip, lr}
388354
#endif
389355

390-
#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
391-
/* switch to privileged stack */
392-
msr PSP, ip
393-
#endif
394-
395356
/* Note (applies when using stack limit checking):
396357
* We do not need to lock IRQs after switching PSP to the privileged stack;
397358
* PSPLIM is guarding the default (user) stack, which, by design, is
@@ -400,14 +361,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
400361
* the maximum exception stack frame.
401362
*/
402363

403-
#if defined(CONFIG_BUILTIN_STACK_GUARD)
404-
/* Set stack pointer limit (needed in privileged mode) */
405-
ldr ip, =_kernel
406-
ldr ip, [ip, #_kernel_offset_to_current]
407-
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
408-
msr PSPLIM, ip
409-
#endif
410-
411364
/*
412365
* r0-r5 contain arguments
413366
* r6 contains call_id

include/zephyr/arch/arm/cortex_m/cpu.h

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,41 @@
2626
#define _EXC_RETURN_SPSEL_Msk (1 << 2)
2727
#define _EXC_RETURN_FTYPE_Msk (1 << 4)
2828

29+
/*
30+
* Cortex-M Exception Stack Frame Layouts
31+
*
32+
* When an exception is taken, the processor automatically pushes
33+
* registers to the current stack. The layout depends on whether
34+
* the FPU is active.
35+
*/
36+
37+
/* Basic hardware-saved exception stack frame (no FPU context):
38+
* R0-R3 (4 x 4B = 16B)
39+
* R12 (4B)
40+
* LR (4B)
41+
* Return address (4B)
42+
* RETPSR (4B)
43+
*--------------------------
44+
* Total: 32 bytes
45+
*/
46+
#define _EXC_HW_SAVED_BASIC_SF_SIZE (32)
47+
#define _EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET (24)
48+
#define _EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET (28)
49+
50+
/* Extended hardware saved stack frame consists of:
51+
* R0-R3 (16B)
52+
* R12 (4B)
53+
* LR (R14) (4B)
54+
* Return address (4B)
55+
* RETPSR (4B)
56+
* S0-S15 (16 x 4B = 64B)
57+
* FPSCR (4B)
58+
* Reserved (4B)
59+
*--------------------------
60+
* Total: 104 bytes
61+
*/
62+
#define _EXC_HW_SAVED_EXTENDED_SF_SIZE (104)
63+
2964
#else
3065
#include <stdint.h>
3166

0 commit comments

Comments
 (0)