Skip to content

Commit a161328

Browse files
committed
arch/arm: Reorganize arm-m interrupt exit for performance
Some nitpicky hand-optimizations, no logic changes: + Shrink the assembly entry to put more of the logic into compiler-optimizable C. + Split arm_m_must_switch() into two functions so that the first doesn't look so big to the compiler. That allows it to spill (many) fewer register on entry and speeds the (very) common early-exit case where an interrupt returns without context switch. Signed-off-by: Andy Ross <[email protected]>
1 parent 101c8ee commit a161328

File tree

1 file changed

+26
-19
lines changed

1 file changed

+26
-19
lines changed

arch/arm/core/cortex_m/arm-m-switch.c

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -447,35 +447,44 @@ void *arm_m_new_stack(char *base, uint32_t sz, void *entry, void *arg0, void *ar
447447
#endif
448448
}
449449

450-
bool arm_m_must_switch(uint32_t lr)
450+
bool arm_m_do_switch(struct k_thread *last_thread, void *next);
451+
452+
bool arm_m_must_switch(void)
451453
{
454+
/* This lock is held until the end of the context switch, at
455+
* which point it will be dropped unconditionally. Save a few
456+
* cycles by skipping the needless bits of arch_irq_lock().
457+
*/
458+
uint32_t pri = _EXC_IRQ_DEFAULT_PRIO;
459+
460+
__asm__ volatile("msr basepri, %0" :: "r"(pri));
461+
452462
/* Secure mode transistions can push a non-thread frame to the
453463
* stack. If not enabled, we already know by construction
454464
* that we're handling the bottom level of the interrupt stack
455465
* and returning to thread mode.
456466
*/
457467
if ((IS_ENABLED(CONFIG_ARM_SECURE_FIRMWARE) ||
458468
IS_ENABLED(CONFIG_ARM_NONSECURE_FIRMWARE))
459-
&& !is_thread_return(lr)) {
469+
&& !is_thread_return((uint32_t)arm_m_cs_ptrs.lr_save)) {
460470
return false;
461471
}
462472

463-
/* This lock is held until the end of the context switch, at
464-
* which point it will be dropped unconditionally. Save a few
465-
* cycles by skipping the needless bits of arch_irq_lock().
466-
*/
467-
uint32_t pri = _EXC_IRQ_DEFAULT_PRIO;
468-
469-
__asm__ volatile("msr basepri, %0" :: "r"(pri));
470-
471-
struct k_thread *last_thread = last_thread = _current;
472-
void *last, *next = z_sched_next_handle(last_thread);
473+
struct k_thread *last_thread = _current;
474+
void *next = z_sched_next_handle(last_thread);
473475

474476
if (next == NULL) {
475477
return false;
476478
}
477479

478-
bool fpu = fpu_state_pushed(lr);
480+
arm_m_do_switch(last_thread, next);
481+
return true;
482+
}
483+
484+
bool arm_m_do_switch(struct k_thread *last_thread, void *next)
485+
{
486+
void *last;
487+
bool fpu = fpu_state_pushed((uint32_t)arm_m_cs_ptrs.lr_save);
479488

480489
__asm__ volatile("mrs %0, psp" : "=r"(last));
481490
last = arm_m_cpu_to_switch(last_thread, last, fpu);
@@ -539,19 +548,17 @@ void arm_m_legacy_exit(void)
539548
#ifdef CONFIG_MULTITHREADING
540549
__asm__(".globl arm_m_exc_exit;"
541550
"arm_m_exc_exit:;"
542-
" ldr r2, =arm_m_cs_ptrs;"
543-
" ldr r0, [r2, #8];" /* lr_save as argument */
544551
" bl arm_m_must_switch;"
545552
" ldr r2, =arm_m_cs_ptrs;"
546-
" ldr lr, [r2, #8];" /* refetch lr_save as default lr */
553+
" mov r3, #0;"
554+
" ldr lr, [r2, #8];" /* lr_save */
547555
" cbz r0, 1f;"
548-
" ldm r2, {r0, r1};" /* fields: out, in */
549556
" mov lr, #0xfffffffd;" /* integer-only LR */
557+
" ldm r2, {r0, r1};" /* fields: out, in */
550558
" stm r0, {r4-r11};" /* out is a switch_frame */
551559
" ldm r1!, {r7-r11};" /* in is a synth_frame */
552560
" ldm r1, {r4-r6};"
553561
"1:\n"
554-
" mov r1, #0;"
555-
" msr basepri, r1;" /* release lock taken in must_switch */
562+
" msr basepri, r3;" /* release lock taken in must_switch */
556563
" bx lr;");
557564
#endif

0 commit comments

Comments
 (0)