@@ -351,7 +351,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
351
351
/ * Restore user stack and original r0 , r1 * /
352
352
pop {r0 , r1}
353
353
354
- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
354
+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
355
355
/ * setup privileged stack * /
356
356
ldr ip , =_kernel
357
357
ldr ip , [ ip , #_kernel_offset_to_current ]
@@ -362,16 +362,19 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
362
362
subs ip , # 8
363
363
str sp , [ ip , # 0 ]
364
364
str lr , [ ip , # 4 ]
365
- #elif defined(CONFIG_CPU_CORTEX_R)
366
- / * Store current LR at the beginning of the priv stack * /
367
- push {lr}
368
- #endif
369
-
370
- #if !defined(CONFIG_CPU_CORTEX_R)
365
+ #elif defined(CONFIG_ARMV7_R)
371
366
/ *
372
- * switch to privileged stack
373
- * The stack switch happens on exception entry for Cortex - R
367
+ * The SVC handler has already switched to the privileged stack.
368
+ * Store the user SP and LR at the beginning of the priv stack.
374
369
* /
370
+ ldr ip , =_kernel
371
+ ldr ip , [ ip , #_kernel_offset_to_current ]
372
+ ldr ip , [ ip , #_thread_offset_to_sp_usr ]
373
+ push { ip , lr}
374
+ #endif
375
+
376
+ #if !defined(CONFIG_ARMV7_R)
377
+ / * switch to privileged stack * /
375
378
msr PSP , ip
376
379
#endif
377
380
@@ -446,19 +449,15 @@ dispatch_syscall:
446
449
mov r0 , ip
447
450
448
451
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
449
- || defined(CONFIG_CPU_CORTEX_R )
452
+ || defined(CONFIG_ARMV7_R )
450
453
ldr ip , =K_SYSCALL_BAD
451
454
cmp r6 , ip
452
455
bne valid_syscall
453
456
454
457
/ * BAD SYSCALL path * /
455
458
/ * fixup stack frame on the privileged stack , adding ssf * /
456
459
mov ip , sp
457
- #if defined(CONFIG_CPU_CORTEX_R)
458
- push {r4 , r5 , ip }
459
- #else
460
460
push {r4 , r5 , ip , lr}
461
- #endif
462
461
b dispatch_syscall
463
462
464
463
valid_syscall:
@@ -472,36 +471,13 @@ dispatch_syscall:
472
471
add ip , r6
473
472
ldr ip , [ ip ] / * load table address * /
474
473
475
- #if defined(CONFIG_CPU_CORTEX_R)
476
- / *
477
- * We can only be in this system call handling code if interrupts were
478
- * enabled. This is because we would only come down this path if we were
479
- * actively running in user state , and user state CANNOT disable external
480
- * interrupts via irq_lock(). We want external interrupts enabled while
481
- * running the system call handler , so we can blindly enable them now , and
482
- * disable them afterwards.
483
- * /
484
- cpsie i
485
- #endif
486
-
487
474
/ * execute function from dispatch table * /
488
475
blx ip
489
476
490
- #if defined(CONFIG_CPU_CORTEX_R)
491
- / *
492
- * for same reasoning as above: we now disable external interrupts.
493
- * /
494
- cpsid i
495
-
496
- / * restore LR * /
497
- ldr lr , [ sp , # 12 ]
498
- #else
499
477
/ * restore LR * /
500
478
ldr lr , [ sp , # 16 ]
501
479
#endif
502
480
503
- #endif
504
-
505
481
506
482
#if defined(CONFIG_BUILTIN_STACK_GUARD)
507
483
/ *
@@ -545,9 +521,12 @@ dispatch_syscall:
545
521
/ * Restore r0 * /
546
522
mov r0 , ip
547
523
548
- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
524
+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
549
525
/ * set stack back to unprivileged stack * /
550
526
ldr ip , [ sp , # 12 ]
527
+ #endif
528
+
529
+ #if !defined(CONFIG_ARMV7_R)
551
530
msr PSP , ip
552
531
#endif
553
532
@@ -574,18 +553,21 @@ dispatch_syscall:
574
553
orrs r2 , r2 , r3
575
554
msr CONTROL , r2
576
555
pop {r2 , r3}
577
- #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
556
+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
557
+ || defined(CONFIG_CPU_CORTEX_R)
578
558
ldr r0 , =_kernel
579
559
ldr r0 , [ r0 , #_kernel_offset_to_current ]
580
560
ldr r1 , [ r0 , #_thread_offset_to_mode ]
581
561
orrs r1 , r1 , # 1
582
562
/ * Store (unprivileged) mode in thread's mode state variable * /
583
563
str r1 , [ r0 , #_thread_offset_to_mode ]
584
564
dsb
565
+ #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
585
566
/ * drop privileges by setting bit 0 in CONTROL * /
586
567
mrs ip , CONTROL
587
568
orrs ip , ip , # 1
588
569
msr CONTROL , ip
570
+ #endif
589
571
#endif
590
572
591
573
/ * ISB is not strictly necessary here (stack pointer is not being
@@ -636,42 +618,10 @@ dispatch_syscall:
636
618
* /
637
619
mov ip , r8
638
620
orrs ip , ip , # 1
639
-
640
- #endif
641
-
642
- #if defined(CONFIG_CPU_CORTEX_R)
643
- / *
644
- * The stack contains (from top)
645
- * spsr lr lr_svc r12 r3 r2 r1 r0 lr sp r5 r4
646
- * Unwind everything except the return state th at will be used for rfeia.
647
- * /
648
- add sp , sp , #( 8 * 4 )
649
- ldmia sp ! , { r12 , lr}
650
- pop {r2 , r3}
651
-
652
- cps #MODE_SVC
653
-
654
- / *
655
- * Restore lr_svc stored into the SVC mode stack by the mode entry
656
- * function. This ensures th at the return address of the interrupted
657
- * context is preserved in case of interrupt nesting.
658
- * /
659
- pop {lr}
660
-
661
- / *
662
- * Move the return state from the privileged stack to the service
663
- * stack. We need to put the user stack back in $ sp , but we cannot
664
- * trust the user stack. Therefore , put the return state on the svc
665
- * stack and return from there.
666
- * /
667
- push {r2 , r3}
668
-
621
+ #elif defined(CONFIG_ARMV7_R)
669
622
/ * Restore user stack pointer * /
670
- ldr r1 , =_kernel
671
- ldr r1 , [ r1 , #_kernel_offset_to_current ]
672
- cps #MODE_SYS
673
- ldr sp , [ r1 , #_thread_offset_to_sp_usr ] / * sp_usr * /
674
- cps #MODE_SVC
623
+ ldr ip , [ sp , # 12 ]
624
+ mov sp , ip
675
625
676
626
/ * Zero out volatile (caller - saved) registers so as to not leak state from
677
627
* kernel mode. The C calling convention for the syscall handler will
@@ -681,12 +631,15 @@ dispatch_syscall:
681
631
mov r2 , # 0
682
632
mov r3 , # 0
683
633
684
- / * return from SVC state to user state. * /
685
- rfeia sp !
686
- #else
687
- bx ip
634
+ / *
635
+ * return back to original function th at called SVC
636
+ * /
637
+ mov ip , r8
638
+ cps #MODE_USR
688
639
#endif
689
640
641
+ bx ip
642
+
690
643
691
644
/ *
692
645
* size_t arch_user_string_nlen(const char * s , size_t maxsize , int * err_arg)
0 commit comments