@@ -351,7 +351,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
351351 / * Restore user stack and original r0 , r1 * /
352352 pop {r0 , r1}
353353
354- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
354+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
355355 / * setup privileged stack * /
356356 ldr ip , =_kernel
357357 ldr ip , [ ip , #_kernel_offset_to_current ]
@@ -362,16 +362,19 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
362362 subs ip , # 8
363363 str sp , [ ip , # 0 ]
364364 str lr , [ ip , # 4 ]
365- #elif defined(CONFIG_CPU_CORTEX_R)
366- / * Store current LR at the beginning of the priv stack * /
367- push {lr}
368- #endif
369-
370- #if !defined(CONFIG_CPU_CORTEX_R)
365+ #elif defined(CONFIG_ARMV7_R)
371366 / *
372- * switch to privileged stack
373- * The stack switch happens on exception entry for Cortex - R
367+ * The SVC handler has already switched to the privileged stack.
368+ * Store the user SP and LR at the beginning of the priv stack.
374369 * /
370+ ldr ip , =_kernel
371+ ldr ip , [ ip , #_kernel_offset_to_current ]
372+ ldr ip , [ ip , #_thread_offset_to_sp_usr ]
373+ push { ip , lr}
374+ #endif
375+
376+ #if !defined(CONFIG_ARMV7_R)
377+ / * switch to privileged stack * /
375378 msr PSP , ip
376379#endif
377380
@@ -446,19 +449,15 @@ dispatch_syscall:
446449 mov r0 , ip
447450
448451#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
449- || defined(CONFIG_CPU_CORTEX_R )
452+ || defined(CONFIG_ARMV7_R )
450453 ldr ip , =K_SYSCALL_BAD
451454 cmp r6 , ip
452455 bne valid_syscall
453456
454457 / * BAD SYSCALL path * /
455458 / * fixup stack frame on the privileged stack , adding ssf * /
456459 mov ip , sp
457- #if defined(CONFIG_CPU_CORTEX_R)
458- push {r4 , r5 , ip }
459- #else
460460 push {r4 , r5 , ip , lr}
461- #endif
462461 b dispatch_syscall
463462
464463valid_syscall:
@@ -472,36 +471,13 @@ dispatch_syscall:
472471 add ip , r6
473472 ldr ip , [ ip ] / * load table address * /
474473
475- #if defined(CONFIG_CPU_CORTEX_R)
476- / *
477- * We can only be in this system call handling code if interrupts were
478- * enabled. This is because we would only come down this path if we were
479- * actively running in user state , and user state CANNOT disable external
480- * interrupts via irq_lock(). We want external interrupts enabled while
481- * running the system call handler , so we can blindly enable them now , and
482- * disable them afterwards.
483- * /
484- cpsie i
485- #endif
486-
487474 / * execute function from dispatch table * /
488475 blx ip
489476
490- #if defined(CONFIG_CPU_CORTEX_R)
491- / *
492- * for same reasoning as above: we now disable external interrupts.
493- * /
494- cpsid i
495-
496- / * restore LR * /
497- ldr lr , [ sp , # 12 ]
498- #else
499477 / * restore LR * /
500478 ldr lr , [ sp , # 16 ]
501479#endif
502480
503- #endif
504-
505481
506482#if defined(CONFIG_BUILTIN_STACK_GUARD)
507483 / *
@@ -545,9 +521,12 @@ dispatch_syscall:
545521 / * Restore r0 * /
546522 mov r0 , ip
547523
548- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
524+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
549525 / * set stack back to unprivileged stack * /
550526 ldr ip , [ sp , # 12 ]
527+ #endif
528+
529+ #if !defined(CONFIG_ARMV7_R)
551530 msr PSP , ip
552531#endif
553532
@@ -574,18 +553,21 @@ dispatch_syscall:
574553 orrs r2 , r2 , r3
575554 msr CONTROL , r2
576555 pop {r2 , r3}
577- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
556+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
557+ || defined(CONFIG_CPU_CORTEX_R)
578558 ldr r0 , =_kernel
579559 ldr r0 , [ r0 , #_kernel_offset_to_current ]
580560 ldr r1 , [ r0 , #_thread_offset_to_mode ]
581561 orrs r1 , r1 , # 1
582562 / * Store (unprivileged) mode in thread's mode state variable * /
583563 str r1 , [ r0 , #_thread_offset_to_mode ]
584564 dsb
565+ #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
585566 / * drop privileges by setting bit 0 in CONTROL * /
586567 mrs ip , CONTROL
587568 orrs ip , ip , # 1
588569 msr CONTROL , ip
570+ #endif
589571#endif
590572
591573 / * ISB is not strictly necessary here (stack pointer is not being
@@ -636,42 +618,10 @@ dispatch_syscall:
636618 * /
637619 mov ip , r8
638620 orrs ip , ip , # 1
639-
640- #endif
641-
642- #if defined(CONFIG_CPU_CORTEX_R)
643- / *
644- * The stack contains (from top)
645- * spsr lr lr_svc r12 r3 r2 r1 r0 lr sp r5 r4
646- * Unwind everything except the return state th at will be used for rfeia.
647- * /
648- add sp , sp , #( 8 * 4 )
649- ldmia sp ! , { r12 , lr}
650- pop {r2 , r3}
651-
652- cps #MODE_SVC
653-
654- / *
655- * Restore lr_svc stored into the SVC mode stack by the mode entry
656- * function. This ensures th at the return address of the interrupted
657- * context is preserved in case of interrupt nesting.
658- * /
659- pop {lr}
660-
661- / *
662- * Move the return state from the privileged stack to the service
663- * stack. We need to put the user stack back in $ sp , but we cannot
664- * trust the user stack. Therefore , put the return state on the svc
665- * stack and return from there.
666- * /
667- push {r2 , r3}
668-
621+ #elif defined(CONFIG_ARMV7_R)
669622 / * Restore user stack pointer * /
670- ldr r1 , =_kernel
671- ldr r1 , [ r1 , #_kernel_offset_to_current ]
672- cps #MODE_SYS
673- ldr sp , [ r1 , #_thread_offset_to_sp_usr ] / * sp_usr * /
674- cps #MODE_SVC
623+ ldr ip , [ sp , # 12 ]
624+ mov sp , ip
675625
676626 / * Zero out volatile (caller - saved) registers so as to not leak state from
677627 * kernel mode. The C calling convention for the syscall handler will
@@ -681,12 +631,15 @@ dispatch_syscall:
681631 mov r2 , # 0
682632 mov r3 , # 0
683633
684- / * return from SVC state to user state. * /
685- rfeia sp !
686- #else
687- bx ip
634+ / *
635+ * return back to original function th at called SVC
636+ * /
637+ mov ip , r8
638+ cps #MODE_USR
688639#endif
689640
641+ bx ip
642+
690643
691644/ *
692645 * size_t arch_user_string_nlen(const char * s , size_t maxsize , int * err_arg)
0 commit comments