Skip to content

Commit 2a357e5

Browse files
Bradley Bolennashif
authored andcommitted
arch: arm: core: aarch32: Fix the syscall design for Cortex-R
When calling a syscall, the SVC routine will now elevate the thread to privileged mode and exit the SVC setting the return address to the syscall handler. When the thread is swapped back in, it will be running z_do_arm_syscall in system mode. That function will run the syscall then automatically return the thread to usr mode. This allows running the syscall in sys mode on a thread so that we can use syscalls that sleep without doing unnatural things. The previous implementation would enable interrupts while still in the SVC call and do weird things with the nesting count. An interrupt could happen during this time when the syscall was still in the exception state, but the nested count had been decremented too soon. Correctness of the nested count is important for future floating point unit work. The Cortex-R behavior now matches that of Cortex-M. Signed-off-by: Bradley Bolen <[email protected]>
1 parent 771c177 commit 2a357e5

File tree

2 files changed

+56
-137
lines changed

2 files changed

+56
-137
lines changed

arch/arm/core/aarch32/swap_helper.S

Lines changed: 25 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -646,6 +646,7 @@ svc_system_thread:
646646
srsdb #MODE_SYS!
647647
cps #MODE_SYS
648648
push {r0-r3, r12, lr}
649+
mov ip, sp
649650
cps #MODE_SVC
650651

651652
/*
@@ -723,20 +724,14 @@ _oops:
723724
#if defined(CONFIG_USERSPACE)
724725
/*
725726
* System call will setup a jump to the _do_arm_syscall function
726-
* when the SVC returns via the bx lr.
727+
* running in system mode when returning from the exception.
727728
*
728729
* There is some trickery involved here because we have to preserve
729730
* the original PC value so that we can return back to the caller of
730731
* the SVC.
731732
*
732733
* On SVC exception, the USER/SYSTEM stack looks like the following:
733-
*
734-
* sp+0: r0
735-
* sp+4: r1
736-
* sp+8: r2
737-
* sp+12: r3
738-
* sp+16: r12
739-
* sp+20: LR_svc (address of opcode just following SVC opcode )
734+
* r0 - r1 - r2 - r3 - r12 - LR - { possible FPU space } - PC - SPSR
740735
*
741736
* Registers look like:
742737
* r0 - arg1
@@ -749,6 +744,26 @@ _oops:
749744
* r8 - saved link register
750745
*/
751746
_do_syscall:
747+
ldr r8, [ip, #24] /* grab address of LR from stack frame */
748+
749+
/* Make the exception return to system state */
750+
ldr r1, [ip, #28]
751+
752+
/* If leaving thumb mode, set the return address to thumb mode */
753+
tst r1, #T_BIT
754+
orrne r8, #1
755+
756+
bic r1, #(MODE_MASK | T_BIT)
757+
orr r1, r1, #MODE_SYS
758+
str r1, [ip, #28]
759+
760+
/*
761+
* Store the address of z_arm_do_syscall for the exit so the exception
762+
* return goes there in system state.
763+
*/
764+
ldr r1, =z_arm_do_syscall
765+
str r1, [ip, #24] /* overwrite the LR to point to z_arm_do_syscall */
766+
752767
/* validate syscall limit, only set priv mode if valid */
753768
ldr ip, =K_SYSCALL_LIMIT
754769
cmp r6, ip
@@ -761,7 +776,6 @@ _do_syscall:
761776
ldr r6, =K_SYSCALL_BAD
762777

763778
valid_syscall_id:
764-
push {r0, r1}
765779
ldr r0, =_kernel
766780
ldr r0, [r0, #_kernel_offset_to_current]
767781
ldr r1, [r0, #_thread_offset_to_mode]
@@ -776,56 +790,8 @@ valid_syscall_id:
776790
*/
777791
isb
778792

779-
/*
780-
* restore r0-r3 from supervisor stack before changing to system mode.
781-
* r0,r1 saved just after valid_syscall_id
782-
* r2,r3 saved just after z_arm_svc
783-
*/
784-
pop {r0-r3}
785-
786-
add sp,sp,r3 /* un-do stack pointer alignment to double-word boundary */
787-
788-
/* Switch to system mode */
789-
cps #MODE_SYS
790-
791-
/*
792-
* Restore the nested level. The thread that is doing the system call may
793-
* be put to sleep, as in the case of waiting in k_msgq_get() with
794-
* K_FOREVER, so we don't want the nesting level to be elevated during
795-
* that complete time.
796-
*/
797-
ldr r2, =_kernel
798-
ldr r1, [r2, #_kernel_offset_to_nested]
799-
sub r1, r1, #1
800-
str r1, [r2, #_kernel_offset_to_nested]
801-
802-
/*
803-
* restore r0-r3 from stack since we've used them above during demux
804-
*/
805-
ldr r0, [sp, #0]
806-
ldr r1, [sp, #4]
807-
ldr r2, [sp, #8]
808-
ldr r3, [sp, #12]
809-
810-
/*
811-
* grab return address from USER/SYSTEM stack frame
812-
* (just past the SVC opcode)
813-
*/
814-
ldr r8, [sp, #20]
815-
816-
/*
817-
* User stack left with:
818-
*
819-
* sp: r0
820-
* sp+4: r1
821-
* sp+8: r2
822-
* sp+12: r3
823-
* sp+16: r12
824-
* sp+20: LR_svc (address of opcode just following SVC opcode )
825-
*/
826-
827-
/* branch to _arm_do_syscall. We will not return here. */
828-
b z_arm_do_syscall
793+
/* Return to _arm_do_syscall in system state. */
794+
b z_arm_int_exit
829795
#endif
830796

831797
GTEXT(z_arm_cortex_r_svc)

arch/arm/core/aarch32/userspace.S

Lines changed: 31 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
351351
/* Restore user stack and original r0, r1 */
352352
pop {r0, r1}
353353

354-
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
354+
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
355355
/* setup privileged stack */
356356
ldr ip, =_kernel
357357
ldr ip, [ip, #_kernel_offset_to_current]
@@ -362,16 +362,19 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
362362
subs ip, #8
363363
str sp, [ip, #0]
364364
str lr, [ip, #4]
365-
#elif defined(CONFIG_CPU_CORTEX_R)
366-
/* Store current LR at the beginning of the priv stack */
367-
push {lr}
368-
#endif
369-
370-
#if !defined(CONFIG_CPU_CORTEX_R)
365+
#elif defined(CONFIG_ARMV7_R)
371366
/*
372-
* switch to privileged stack
373-
* The stack switch happens on exception entry for Cortex-R
367+
* The SVC handler has already switched to the privileged stack.
368+
* Store the user SP and LR at the beginning of the priv stack.
374369
*/
370+
ldr ip, =_kernel
371+
ldr ip, [ip, #_kernel_offset_to_current]
372+
ldr ip, [ip, #_thread_offset_to_sp_usr]
373+
push {ip, lr}
374+
#endif
375+
376+
#if !defined(CONFIG_ARMV7_R)
377+
/* switch to privileged stack */
375378
msr PSP, ip
376379
#endif
377380

@@ -446,19 +449,15 @@ dispatch_syscall:
446449
mov r0, ip
447450

448451
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
449-
|| defined(CONFIG_CPU_CORTEX_R)
452+
|| defined(CONFIG_ARMV7_R)
450453
ldr ip, =K_SYSCALL_BAD
451454
cmp r6, ip
452455
bne valid_syscall
453456

454457
/* BAD SYSCALL path */
455458
/* fixup stack frame on the privileged stack, adding ssf */
456459
mov ip, sp
457-
#if defined(CONFIG_CPU_CORTEX_R)
458-
push {r4,r5,ip}
459-
#else
460460
push {r4,r5,ip,lr}
461-
#endif
462461
b dispatch_syscall
463462

464463
valid_syscall:
@@ -472,36 +471,13 @@ dispatch_syscall:
472471
add ip, r6
473472
ldr ip, [ip] /* load table address */
474473

475-
#if defined(CONFIG_CPU_CORTEX_R)
476-
/*
477-
* We can only be in this system call handling code if interrupts were
478-
* enabled. This is because we would only come down this path if we were
479-
* actively running in user state, and user state CANNOT disable external
480-
* interrupts via irq_lock(). We want external interrupts enabled while
481-
* running the system call handler, so we can blindly enable them now, and
482-
* disable them afterwards.
483-
*/
484-
cpsie i
485-
#endif
486-
487474
/* execute function from dispatch table */
488475
blx ip
489476

490-
#if defined(CONFIG_CPU_CORTEX_R)
491-
/*
492-
* for same reasoning as above: we now disable external interrupts.
493-
*/
494-
cpsid i
495-
496-
/* restore LR */
497-
ldr lr, [sp,#12]
498-
#else
499477
/* restore LR */
500478
ldr lr, [sp,#16]
501479
#endif
502480

503-
#endif
504-
505481

506482
#if defined(CONFIG_BUILTIN_STACK_GUARD)
507483
/*
@@ -545,9 +521,12 @@ dispatch_syscall:
545521
/* Restore r0 */
546522
mov r0, ip
547523

548-
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
524+
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
549525
/* set stack back to unprivileged stack */
550526
ldr ip, [sp,#12]
527+
#endif
528+
529+
#if !defined(CONFIG_ARMV7_R)
551530
msr PSP, ip
552531
#endif
553532

@@ -574,18 +553,21 @@ dispatch_syscall:
574553
orrs r2, r2, r3
575554
msr CONTROL, r2
576555
pop {r2, r3}
577-
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
556+
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
557+
|| defined(CONFIG_CPU_CORTEX_R)
578558
ldr r0, =_kernel
579559
ldr r0, [r0, #_kernel_offset_to_current]
580560
ldr r1, [r0, #_thread_offset_to_mode]
581561
orrs r1, r1, #1
582562
/* Store (unprivileged) mode in thread's mode state variable */
583563
str r1, [r0, #_thread_offset_to_mode]
584564
dsb
565+
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
585566
/* drop privileges by setting bit 0 in CONTROL */
586567
mrs ip, CONTROL
587568
orrs ip, ip, #1
588569
msr CONTROL, ip
570+
#endif
589571
#endif
590572

591573
/* ISB is not strictly necessary here (stack pointer is not being
@@ -636,42 +618,10 @@ dispatch_syscall:
636618
*/
637619
mov ip, r8
638620
orrs ip, ip, #1
639-
640-
#endif
641-
642-
#if defined(CONFIG_CPU_CORTEX_R)
643-
/*
644-
* The stack contains (from top)
645-
* spsr lr lr_svc r12 r3 r2 r1 r0 lr sp r5 r4
646-
* Unwind everything except the return state that will be used for rfeia.
647-
*/
648-
add sp, sp, #(8*4)
649-
ldmia sp!, {r12,lr}
650-
pop {r2, r3}
651-
652-
cps #MODE_SVC
653-
654-
/*
655-
* Restore lr_svc stored into the SVC mode stack by the mode entry
656-
* function. This ensures that the return address of the interrupted
657-
* context is preserved in case of interrupt nesting.
658-
*/
659-
pop {lr}
660-
661-
/*
662-
* Move the return state from the privileged stack to the service
663-
* stack. We need to put the user stack back in $sp, but we cannot
664-
* trust the user stack. Therefore, put the return state on the svc
665-
* stack and return from there.
666-
*/
667-
push {r2, r3}
668-
621+
#elif defined(CONFIG_ARMV7_R)
669622
/* Restore user stack pointer */
670-
ldr r1, =_kernel
671-
ldr r1, [r1, #_kernel_offset_to_current]
672-
cps #MODE_SYS
673-
ldr sp, [r1, #_thread_offset_to_sp_usr] /* sp_usr */
674-
cps #MODE_SVC
623+
ldr ip, [sp,#12]
624+
mov sp, ip
675625

676626
/* Zero out volatile (caller-saved) registers so as to not leak state from
677627
* kernel mode. The C calling convention for the syscall handler will
@@ -681,12 +631,15 @@ dispatch_syscall:
681631
mov r2, #0
682632
mov r3, #0
683633

684-
/* return from SVC state to user state. */
685-
rfeia sp!
686-
#else
687-
bx ip
634+
/*
635+
* return back to original function that called SVC
636+
*/
637+
mov ip, r8
638+
cps #MODE_USR
688639
#endif
689640

641+
bx ip
642+
690643

691644
/*
692645
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)

0 commit comments

Comments
 (0)