Skip to content

Commit e646b7f

Browse files
nashifmmahadevan108
authored andcommitted
Revert "arch: arm: cortex_m: move part of swap_helper to C"
This reverts commit 773739a. Fixes #80701 Signed-off-by: Anas Nashif <[email protected]>
1 parent 7aa4032 commit e646b7f

File tree

2 files changed

+140
-67
lines changed

2 files changed

+140
-67
lines changed

arch/arm/core/cortex_m/swap.c

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
/*
22
* Copyright (c) 2018 Linaro, Limited
3-
* Copyright (c) 2023 Arm Limited
43
*
54
* SPDX-License-Identifier: Apache-2.0
65
*/
@@ -48,59 +47,3 @@ int arch_swap(unsigned int key)
4847
*/
4948
return _current->arch.swap_return_value;
5049
}
51-
52-
uintptr_t z_arm_pendsv_c(uintptr_t exc_ret)
53-
{
54-
/* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
55-
IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
56-
(_kernel.cpus[0].current->arch.mode_exc_return = (uint8_t)exc_ret;));
57-
58-
/* Protect the kernel state while we play with the thread lists */
59-
uint32_t basepri = arch_irq_lock();
60-
61-
/* fetch the thread to run from the ready queue cache */
62-
struct k_thread *current = _kernel.cpus[0].current = _kernel.ready_q.cache;
63-
64-
/*
65-
* Clear PendSV so that if another interrupt comes in and
66-
* decides, with the new kernel state based on the new thread
67-
* being context-switched in, that it needs to reschedule, it
68-
* will take, but that previously pended PendSVs do not take,
69-
* since they were based on the previous kernel state and this
70-
* has been handled.
71-
*/
72-
SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
73-
74-
/* For Cortex-M, store TLS pointer in a global variable,
75-
* as it lacks the process ID or thread ID register
76-
* to be used by toolchain to access thread data.
77-
*/
78-
IF_ENABLED(CONFIG_THREAD_LOCAL_STORAGE,
79-
(extern uintptr_t z_arm_tls_ptr; z_arm_tls_ptr = current->tls));
80-
81-
IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
82-
(exc_ret = (exc_ret & 0xFFFFFF00) | current->arch.mode_exc_return));
83-
84-
/* Restore previous interrupt disable state (irq_lock key)
85-
* (We clear the arch.basepri field after restoring state)
86-
*/
87-
basepri = current->arch.basepri;
88-
current->arch.basepri = 0;
89-
90-
arch_irq_unlock(basepri);
91-
92-
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
93-
/* Re-program dynamic memory map */
94-
z_arm_configure_dynamic_mpu_regions(current);
95-
#endif
96-
97-
/* restore mode */
98-
IF_ENABLED(CONFIG_USERSPACE, ({
99-
CONTROL_Type ctrl = {.w = __get_CONTROL()};
100-
/* exit privileged state when returing to thread mode. */
101-
ctrl.b.nPRIV = 0;
102-
__set_CONTROL(ctrl.w | current->arch.mode);
103-
}));
104-
105-
return exc_ret;
106-
}

arch/arm/core/cortex_m/swap_helper.S

Lines changed: 140 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ _ASM_FILE_PROLOGUE
2727
GTEXT(z_arm_svc)
2828
GTEXT(z_arm_pendsv)
2929
GTEXT(z_do_kernel_oops)
30-
GTEXT(z_arm_pendsv_c)
3130
#if defined(CONFIG_USERSPACE)
3231
GTEXT(z_arm_do_syscall)
3332
#endif
@@ -118,20 +117,125 @@ out_fp_endif:
118117
#error Unknown ARM architecture
119118
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
120119

121-
mov r4, lr
122-
mov r0, lr
123-
bl z_arm_pendsv_c
124-
mov lr, r4
120+
/* Protect the kernel state while we play with the thread lists */
121+
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
122+
cpsid i
123+
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
124+
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
125+
msr BASEPRI_MAX, r0
126+
isb /* Make the effect of disabling interrupts be realized immediately */
127+
#else
128+
#error Unknown ARM architecture
129+
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
125130

126-
ldr r1, =_kernel
127-
ldr r2, [r1, #_kernel_offset_to_current]
131+
/*
132+
* Prepare to clear PendSV with interrupts unlocked, but
133+
* don't clear it yet. PendSV must not be cleared until
134+
* the new thread is context-switched in since all decisions
135+
* to pend PendSV have been taken with the current kernel
136+
* state and this is what we're handling currently.
137+
*/
138+
ldr r7, =_SCS_ICSR
139+
ldr r6, =_SCS_ICSR_UNPENDSV
140+
141+
/* _kernel is still in r1 */
142+
143+
/* fetch the thread to run from the ready queue cache */
144+
ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
145+
146+
str r2, [r1, #_kernel_offset_to_current]
147+
148+
/*
149+
* Clear PendSV so that if another interrupt comes in and
150+
* decides, with the new kernel state based on the new thread
151+
* being context-switched in, that it needs to reschedule, it
152+
* will take, but that previously pended PendSVs do not take,
153+
* since they were based on the previous kernel state and this
154+
* has been handled.
155+
*/
156+
157+
/* _SCS_ICSR is still in r7 and _SCS_ICSR_UNPENDSV in r6 */
158+
str r6, [r7, #0]
159+
160+
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
161+
/* Grab the TLS pointer */
162+
ldr r4, =_thread_offset_to_tls
163+
adds r4, r2, r4
164+
ldr r0, [r4]
165+
166+
/* For Cortex-M, store TLS pointer in a global variable,
167+
* as it lacks the process ID or thread ID register
168+
* to be used by toolchain to access thread data.
169+
*/
170+
ldr r4, =z_arm_tls_ptr
171+
str r0, [r4]
172+
#endif
128173

129174
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
130175
/* Restore EXC_RETURN value. */
131-
mov lr, r0
176+
ldrsb lr, [r2, #_thread_offset_to_mode_exc_return]
177+
#endif
178+
179+
/* Restore previous interrupt disable state (irq_lock key)
180+
* (We clear the arch.basepri field after restoring state)
181+
*/
182+
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && (_thread_offset_to_basepri > 124)
183+
/* Doing it this way since the offset to thread->arch.basepri can in
184+
* some configurations be larger than the maximum of 124 for ldr/str
185+
* immediate offsets.
186+
*/
187+
ldr r4, =_thread_offset_to_basepri
188+
adds r4, r2, r4
189+
190+
ldr r0, [r4]
191+
movs.n r3, #0
192+
str r3, [r4]
193+
#else
194+
ldr r0, [r2, #_thread_offset_to_basepri]
195+
movs r3, #0
196+
str r3, [r2, #_thread_offset_to_basepri]
132197
#endif
133198

134199
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
200+
/* BASEPRI not available, previous interrupt disable state
201+
* maps to PRIMASK.
202+
*
203+
* Only enable interrupts if value is 0, meaning interrupts
204+
* were enabled before irq_lock was called.
205+
*/
206+
cmp r0, #0
207+
bne _thread_irq_disabled
208+
cpsie i
209+
_thread_irq_disabled:
210+
211+
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
212+
/* Re-program dynamic memory map */
213+
push {r2,lr}
214+
mov r0, r2
215+
bl z_arm_configure_dynamic_mpu_regions
216+
pop {r2,r3}
217+
mov lr, r3
218+
#endif
219+
220+
#ifdef CONFIG_USERSPACE
221+
/* restore mode */
222+
ldr r3, =_thread_offset_to_mode
223+
adds r3, r2, r3
224+
ldr r0, [r3]
225+
mrs r3, CONTROL
226+
movs.n r1, #1
227+
bics r3, r1
228+
orrs r3, r0
229+
msr CONTROL, r3
230+
231+
/* ISB is not strictly necessary here (stack pointer is not being
232+
* touched), but it's recommended to avoid executing pre-fetched
233+
* instructions with the previous privilege.
234+
*/
235+
isb
236+
237+
#endif
238+
135239
ldr r4, =_thread_offset_to_callee_saved
136240
adds r0, r2, r4
137241

@@ -149,6 +253,9 @@ out_fp_endif:
149253
subs r0, #36
150254
ldmia r0!, {r4-r7}
151255
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
256+
/* restore BASEPRI for the incoming thread */
257+
msr BASEPRI, r0
258+
152259
#ifdef CONFIG_FPU_SHARING
153260
/* Assess whether switched-in thread had been using the FP registers. */
154261
tst lr, #_EXC_RETURN_FTYPE_Msk
@@ -178,6 +285,30 @@ in_fp_endif:
178285
isb
179286
#endif
180287

288+
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
289+
/* Re-program dynamic memory map */
290+
push {r2,lr}
291+
mov r0, r2 /* _current thread */
292+
bl z_arm_configure_dynamic_mpu_regions
293+
pop {r2,lr}
294+
#endif
295+
296+
#ifdef CONFIG_USERSPACE
297+
/* restore mode */
298+
ldr r0, [r2, #_thread_offset_to_mode]
299+
mrs r3, CONTROL
300+
bic r3, #1
301+
orr r3, r0
302+
msr CONTROL, r3
303+
304+
/* ISB is not strictly necessary here (stack pointer is not being
305+
* touched), but it's recommended to avoid executing pre-fetched
306+
* instructions with the previous privilege.
307+
*/
308+
isb
309+
310+
#endif
311+
181312
/* load callee-saved + psp from thread */
182313
add r0, r2, #_thread_offset_to_callee_saved
183314
ldmia r0, {r4-r11, ip}
@@ -298,8 +429,7 @@ _stack_frame_endif:
298429
#endif
299430

300431
/* exception return is done in z_arm_int_exit() */
301-
ldr r0, =z_arm_int_exit
302-
bx r0
432+
b z_arm_int_exit
303433
#endif
304434

305435
_oops:

0 commit comments

Comments
 (0)