Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 13 additions & 14 deletions arch/arm/core/cortex_m/pm_s2ram.S
Original file line number Diff line number Diff line change
Expand Up @@ -178,9 +178,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/*
* Mark entering suspend to RAM.
*/
mov r1, lr
bl pm_s2ram_mark_set
mov lr, r1
bl pm_s2ram_mark_set

/*
* Call the system_off function passed as parameter. This should never
Expand All @@ -199,9 +197,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/*
* Reset the marking of suspend to RAM, return is ignored.
*/
mov r1, lr
bl pm_s2ram_mark_check_and_clear
mov lr, r1
bl pm_s2ram_mark_check_and_clear

/* Move the stored return value of system_off back to r0,
* setting it as return value for this function.
Expand All @@ -216,15 +212,18 @@ GTEXT(arch_pm_s2ram_resume)
SECTION_FUNC(TEXT, arch_pm_s2ram_resume)
/*
* Check if reset occurred after suspending to RAM.
* Store LR to ensure we can continue boot when we are not suspended
* to RAM. In addition to LR, R0 is pushed too, to ensure "SP mod 8 = 0",
* as stated by ARM rule 6.2.1.2 for AAPCS32.
*/
mov r1, lr
bl pm_s2ram_mark_check_and_clear
mov lr, r1
cmp r0, #0x1
beq resume
bx lr

resume:
push {r0, lr}
bl pm_s2ram_mark_check_and_clear
cmp r0, #0x1
pop {r0, lr}
beq .L_resume
bx lr

.L_resume:
/*
* Restore the CPU context
*/
Expand Down
40 changes: 8 additions & 32 deletions arch/arm/core/cortex_m/pm_s2ram.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,44 +22,20 @@ __noinit _cpu_context_t _cpu_context;
*/
static __noinit uint32_t marker;

void __attribute__((naked)) pm_s2ram_mark_set(void)
void pm_s2ram_mark_set(void)
{
__asm__ volatile(
/* Set the marker to MAGIC value */
"str %[_magic_val], [%[_marker]]\n"

"bx lr\n"
:
: [_magic_val] "r"(MAGIC), [_marker] "r"(&marker)
: "r1", "r4", "memory");
marker = MAGIC;
}

bool __attribute__((naked)) pm_s2ram_mark_check_and_clear(void)
bool pm_s2ram_mark_check_and_clear(void)
{
__asm__ volatile(
/* Set return value to 0 */
"mov r0, #0\n"

/* Check the marker */
"ldr r3, [%[_marker]]\n"
"cmp r3, %[_magic_val]\n"
"bne exit\n"

/*
* Reset the marker
*/
"str r0, [%[_marker]]\n"
if (marker == MAGIC) {
marker = 0;

/*
* Set return value to 1
*/
"mov r0, #1\n"
return true;
}

"exit:\n"
"bx lr\n"
:
: [_magic_val] "r"(MAGIC), [_marker] "r"(&marker)
: "r0", "r1", "r3", "r4", "memory");
return false;
}

#endif /* CONFIG_PM_S2RAM_CUSTOM_MARKING */
58 changes: 48 additions & 10 deletions arch/arm/core/cortex_m/reset.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ _ASM_FILE_PROLOGUE
GTEXT(z_arm_reset)
GTEXT(z_early_memset)
GDATA(z_interrupt_stacks)
GDATA(z_main_stack)
#if defined(CONFIG_DEBUG_THREAD_INFO)
GDATA(z_sys_post_kernel)
#endif
Expand All @@ -29,7 +30,6 @@ GTEXT(soc_reset_hook)
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
GTEXT(z_arm_init_arch_hw_at_boot)
GDATA(z_main_stack)
#endif
#if defined(CONFIG_PM_S2RAM)
GTEXT(arch_pm_s2ram_resume)
Expand Down Expand Up @@ -68,13 +68,6 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,z_arm_reset)
*/
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)

#if defined(CONFIG_DEBUG_THREAD_INFO)
/* Clear z_sys_post_kernel flag for RTOS aware debuggers */
movs.n r0, #0
ldr r1, =z_sys_post_kernel
strb r0, [r1]
#endif /* CONFIG_DEBUG_THREAD_INFO */

#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
/* Reset CONTROL register */
movs.n r0, #0
Expand All @@ -90,9 +83,56 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#endif /* CONFIG_INIT_ARCH_HW_AT_BOOT */

#if defined(CONFIG_PM_S2RAM)
/*
* Temporarily set MSP to interrupt stack so that arch_pm_s2ram_resume can
* use stack for calling pm_s2ram_mark_check_and_clear.
* This is safe because suspend never being called from an ISR ensures that
* interrupt stack was not live during suspend.
* Note:
* if resuming from suspend, MSP is restored from cpu context
* if not resuming, MSP will be set back to z_main_stack for proper init
* And, apart from arch_pm_s2ram_resume being able to use the stack for
* a short while, there is no change in behavior in either of the paths.
*/
ldr r0, =z_interrupt_stacks + CONFIG_ISR_STACK_SIZE + MPU_GUARD_ALIGN_AND_SIZE
msr msp, r0

bl arch_pm_s2ram_resume
#endif /* CONFIG_PM_S2RAM */

/*
* MSP needs to be set to main stack for following scenarios:
* - If CONFIG_PM_S2RAM is enabled, MSP is set to z_interrupt_stacks
* for resume functions to make use of a stack. However,
* if the device was not suspended and if resume functions return,
* MSP needs to be set back to z_main_stack to ensure proper
* initialization.
* - If CONFIG_PM_S2RAM is not enabled but CONFIG_INIT_ARCH_HW_AT_BOOT is,
* MSP needs to be set to z_main_stack for proper initialization in case
* device was loaded through chain loading or a debugger, as the initial
* value of MSP would be anything that the previous image loaded.
* - If CONFIG_INIT_STACKS is enabled, we need to ensure MSP is not set
* to z_interrupt_stacks, so we set it to z_main_stack.
*
* Since these scenarios cover most of the cases, we set MSP to
* z_main_stack here.
*
*/
ldr r0, =z_main_stack + CONFIG_MAIN_STACK_SIZE
msr msp, r0

/* Note: Make sure that variables like z_sys_post_kernel
* are set after the call to arch_pm_s2ram_resume
* to avoid any issues with suspend/resume path.
* Refer issue #83660 for more details.
*/
#if defined(CONFIG_DEBUG_THREAD_INFO)
/* Clear z_sys_post_kernel flag for RTOS aware debuggers */
movs.n r0, #0
ldr r1, =z_sys_post_kernel
strb r0, [r1]
#endif /* CONFIG_DEBUG_THREAD_INFO */

#if defined(CONFIG_SOC_RESET_HOOK)
bl soc_reset_hook
#endif
Expand All @@ -105,8 +145,6 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
str r0, [r1]
dsb
#endif /* CONFIG_CPU_HAS_ARM_MPU */
ldr r0, =z_main_stack + CONFIG_MAIN_STACK_SIZE
msr msp, r0

/* Initialize core architecture registers and system blocks */
bl z_arm_init_arch_hw_at_boot
Expand Down
38 changes: 19 additions & 19 deletions arch/arm/core/cortex_m/swap_helper.S
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-out thread had been using the FP registers. */
tst lr, #_EXC_RETURN_FTYPE_Msk
bne out_fp_endif
bne .L_out_fp_endif

/* FP context active: set FP state and store callee-saved registers.
* Note: if Lazy FP stacking is enabled, storing the callee-saved
Expand All @@ -108,7 +108,7 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
add r0, r2, #_thread_offset_to_preempt_float
vstmia r0, {s16-s31}

out_fp_endif:
.L_out_fp_endif:
/* At this point FPCCR.LSPACT is guaranteed to be cleared,
* regardless of whether the thread has an active FP context.
*/
Expand Down Expand Up @@ -204,9 +204,9 @@ out_fp_endif:
* were enabled before irq_lock was called.
*/
cmp r0, #0
bne _thread_irq_disabled
bne .L_thread_irq_disabled
cpsie i
_thread_irq_disabled:
.L_thread_irq_disabled:

#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Re-program dynamic memory map */
Expand Down Expand Up @@ -259,25 +259,25 @@ _thread_irq_disabled:
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-in thread had been using the FP registers. */
tst lr, #_EXC_RETURN_FTYPE_Msk
beq in_fp_active
beq .L_in_fp_active
/* FP context inactive for swapped-in thread:
* - reset FPSCR to 0
* - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
* from pendSV)
*/
movs.n r3, #0
vmsr fpscr, r3
b in_fp_endif
b .L_in_fp_endif

in_fp_active:
.L_in_fp_active:
/* FP context active:
* - clear EXC_RETURN.F_Type
* - FPSCR and caller-saved registers will be restored automatically
* - restore callee-saved FP registers
*/
add r0, r2, #_thread_offset_to_preempt_float
vldmia r0, {s16-s31}
in_fp_endif:
.L_in_fp_endif:
/* Clear CONTROL.FPCA that may have been set by FP instructions */
mrs r3, CONTROL
bic r3, #_CONTROL_FPCA_Msk
Expand Down Expand Up @@ -361,12 +361,12 @@ SECTION_FUNC(TEXT, z_arm_svc)
movs r0, #_EXC_RETURN_SPSEL_Msk
mov r1, lr
tst r1, r0
beq _stack_frame_msp
beq .L_stack_frame_msp
mrs r0, PSP
bne _stack_frame_endif
_stack_frame_msp:
bne .L_stack_frame_endif
.L_stack_frame_msp:
mrs r0, MSP
_stack_frame_endif:
.L_stack_frame_endif:
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
tst lr, #_EXC_RETURN_SPSEL_Msk /* did we come from thread mode ? */
ite eq /* if zero (equal), came from handler mode */
Expand Down Expand Up @@ -399,7 +399,7 @@ _stack_frame_endif:
mrs r2, CONTROL

cmp r1, #3
beq _do_syscall
beq .L_do_syscall

/*
* check that we are privileged before invoking other SVCs
Expand All @@ -411,12 +411,12 @@ _stack_frame_endif:
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
tst r2, #0x1
#endif
bne _oops
bne .L_oops

#endif /* CONFIG_USERSPACE */

cmp r1, #2
beq _oops
beq .L_oops

#if defined(CONFIG_IRQ_OFFLOAD)
push {r0, lr}
Expand All @@ -434,7 +434,7 @@ _stack_frame_endif:

#endif

_oops:
.L_oops:
push {r0, lr}
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
Expand Down Expand Up @@ -484,7 +484,7 @@ _oops:
* r6 - call_id
* r8 - saved link register
*/
_do_syscall:
.L_do_syscall:
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
movs r3, #24
ldr r1, [r0, r3] /* grab address of PC from stack frame */
Expand All @@ -510,15 +510,15 @@ _do_syscall:
/* The supplied syscall_id must be lower than the limit
* (Requires unsigned integer comparison)
*/
blo valid_syscall_id
blo .L_valid_syscall_id

/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
str r6, [r0]
ldr r6, =K_SYSCALL_BAD

/* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */

valid_syscall_id:
.L_valid_syscall_id:
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
Expand Down
10 changes: 0 additions & 10 deletions include/zephyr/arch/common/pm_s2ram.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,6 @@ int arch_pm_s2ram_suspend(pm_s2ram_system_off_fn_t system_off);
*
* Default implementation is setting a magic word in RAM. CONFIG_PM_S2RAM_CUSTOM_MARKING
* allows custom implementation.
* The following requirements must be fulfilled:
* - the function cannot use stack (asm function or function with 'naked' attribute)
* - the content of the R1 and R4 registers must remain unchanged
* - returning from the function should be performed with the `bx lr` instruction
*
*/
void pm_s2ram_mark_set(void);

Expand All @@ -81,11 +76,6 @@ void pm_s2ram_mark_set(void);
*
* Default implementation is checking a magic word in RAM. CONFIG_PM_S2RAM_CUSTOM_MARKING
* allows custom implementation.
* The following requirements must be fulfilled:
* - the function cannot use stack (most likely asm function)
* - the content of the R1 and R4 registers must remain unchanged
* - the function's return value is passed by R0 register
* - returning from the function should be performed with the `bx lr` instruction
*
* @retval true if marking is found which indicates resuming after suspend-to-RAM.
* @retval false if marking is not found which indicates standard boot.
Expand Down
Loading