@@ -27,7 +27,6 @@ _ASM_FILE_PROLOGUE
27
27
GTEXT(z_arm_svc)
28
28
GTEXT(z_arm_pendsv)
29
29
GTEXT(z_do_kernel_oops)
30
- GTEXT(z_arm_pendsv_c)
31
30
#if defined(CONFIG_USERSPACE)
32
31
GTEXT(z_arm_do_syscall)
33
32
#endif
@@ -118,20 +117,125 @@ out_fp_endif:
118
117
#error Unknown ARM architecture
119
118
#endif / * CONFIG_ARMV6_M_ARMV8_M_BASELINE * /
120
119
121
- mov r4 , lr
122
- mov r0 , lr
123
- bl z_arm_pendsv_c
124
- mov lr , r4
120
+ / * Protect the kernel state while we play with the thread lists * /
121
+ #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
122
+ cpsid i
123
+ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
124
+ movs .n r0 , #_EXC_IRQ_DEFAULT_PRIO
125
+ msr BASEPRI_MAX , r0
126
+ isb / * Make the effect of disabling interrupts be realized immediately * /
127
+ #else
128
+ #error Unknown ARM architecture
129
+ #endif / * CONFIG_ARMV6_M_ARMV8_M_BASELINE * /
125
130
126
- ldr r1 , =_kernel
127
- ldr r2 , [ r1 , #_kernel_offset_to_current ]
131
+ / *
132
+ * Prepare to clear PendSV with interrupts unlocked , but
133
+ * don't clear it yet. PendSV must not be cleared until
134
+ * the new thread is context - switched in since all decisions
135
+ * to pend PendSV have been taken with the current kernel
136
+ * state and this is wh at we're handling currently.
137
+ * /
138
+ ldr r7 , =_SCS_ICSR
139
+ ldr r6 , =_SCS_ICSR_UNPENDSV
140
+
141
+ / * _kernel is still in r1 * /
142
+
143
+ / * fetch the thread to run from the ready queue cache * /
144
+ ldr r2 , [ r1 , #_kernel_offset_to_ready_q_cache ]
145
+
146
+ str r2 , [ r1 , #_kernel_offset_to_current ]
147
+
148
+ / *
149
+ * Clear PendSV so th at if another interrupt comes in and
150
+ * decides , with the new kernel state based on the new thread
151
+ * being context - switched in , th at it needs to reschedule , it
152
+ * will take , but th at previously pended PendSVs do not take ,
153
+ * since they were based on the previous kernel state and this
154
+ * has been handled.
155
+ * /
156
+
157
+ / * _SCS_ICSR is still in r7 and _SCS_ICSR_UNPENDSV in r6 * /
158
+ str r6 , [ r7 , # 0 ]
159
+
160
+ #if defined(CONFIG_THREAD_LOCAL_STORAGE)
161
+ / * Grab the TLS pointer * /
162
+ ldr r4 , =_thread_offset_to_tls
163
+ adds r4 , r2 , r4
164
+ ldr r0 , [ r4 ]
165
+
166
+ / * For Cortex - M , store TLS pointer in a global variable ,
167
+ * as it lacks the process ID or thread ID register
168
+ * to be used by toolchain to access thread data.
169
+ * /
170
+ ldr r4 , =z_arm_tls_ptr
171
+ str r0 , [ r4 ]
172
+ #endif
128
173
129
174
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
130
175
/ * Restore EXC_RETURN value. * /
131
- mov lr , r0
176
+ ldrsb lr , [ r2 , #_thread_offset_to_mode_exc_return ]
177
+ #endif
178
+
179
+ / * Restore previous interrupt disable state (irq_lock key)
180
+ * (We clear the arch.basepri field after restoring state)
181
+ * /
182
+ #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && (_thread_offset_to_basepri > 124 )
183
+ / * Doing it this way since the offset to thread - >arch.basepri can in
184
+ * some configurations be larger than the maximum of 124 for ldr/ str
185
+ * immediate offsets.
186
+ * /
187
+ ldr r4 , =_thread_offset_to_basepri
188
+ adds r4 , r2 , r4
189
+
190
+ ldr r0 , [ r4 ]
191
+ movs .n r3 , # 0
192
+ str r3 , [ r4 ]
193
+ #else
194
+ ldr r0 , [ r2 , #_thread_offset_to_basepri ]
195
+ movs r3 , # 0
196
+ str r3 , [ r2 , #_thread_offset_to_basepri ]
132
197
#endif
133
198
134
199
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
200
+ / * BASEPRI not available , previous interrupt disable state
201
+ * maps to PRIMASK.
202
+ *
203
+ * Only enable interrupts if value is 0 , meaning interrupts
204
+ * were enabled before irq_lock was called.
205
+ * /
206
+ cmp r0 , # 0
207
+ bne _thread_irq_disabled
208
+ cpsie i
209
+ _thread_irq_disabled:
210
+
211
+ #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
212
+ / * Re - program dynamic memory map * /
213
+ push {r2 , lr}
214
+ mov r0 , r2
215
+ bl z_arm_configure_dynamic_mpu_regions
216
+ pop {r2 , r3}
217
+ mov lr , r3
218
+ #endif
219
+
220
+ #ifdef CONFIG_USERSPACE
221
+ / * restore mode * /
222
+ ldr r3 , =_thread_offset_to_mode
223
+ adds r3 , r2 , r3
224
+ ldr r0 , [ r3 ]
225
+ mrs r3 , CONTROL
226
+ movs .n r1 , # 1
227
+ bics r3 , r1
228
+ orrs r3 , r0
229
+ msr CONTROL , r3
230
+
231
+ / * ISB is not strictly necessary here (stack pointer is not being
232
+ * touched) , but it's recommended to avoid executing pre - fetched
233
+ * instructions with the previous privilege.
234
+ * /
235
+ isb
236
+
237
+ #endif
238
+
135
239
ldr r4 , =_thread_offset_to_callee_saved
136
240
adds r0 , r2 , r4
137
241
@@ -149,6 +253,9 @@ out_fp_endif:
149
253
subs r0 , # 36
150
254
ldmia r0! , {r4 - r7}
151
255
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
256
+ / * restore BASEPRI for the incoming thread * /
257
+ msr BASEPRI , r0
258
+
152
259
#ifdef CONFIG_FPU_SHARING
153
260
/ * Assess whether switched - in thread had been using the FP registers. * /
154
261
tst lr , #_EXC_RETURN_FTYPE_Msk
@@ -178,6 +285,30 @@ in_fp_endif:
178
285
isb
179
286
#endif
180
287
288
+ #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
289
+ / * Re - program dynamic memory map * /
290
+ push {r2 , lr}
291
+ mov r0 , r2 / * _current thread * /
292
+ bl z_arm_configure_dynamic_mpu_regions
293
+ pop {r2 , lr}
294
+ #endif
295
+
296
+ #ifdef CONFIG_USERSPACE
297
+ / * restore mode * /
298
+ ldr r0 , [ r2 , #_thread_offset_to_mode ]
299
+ mrs r3 , CONTROL
300
+ bic r3 , # 1
301
+ orr r3 , r0
302
+ msr CONTROL , r3
303
+
304
+ / * ISB is not strictly necessary here (stack pointer is not being
305
+ * touched) , but it's recommended to avoid executing pre - fetched
306
+ * instructions with the previous privilege.
307
+ * /
308
+ isb
309
+
310
+ #endif
311
+
181
312
/ * load callee - saved + psp from thread * /
182
313
add r0 , r2 , #_thread_offset_to_callee_saved
183
314
ldmia r0 , {r4 - r11 , ip }
@@ -298,8 +429,7 @@ _stack_frame_endif:
298
429
#endif
299
430
300
431
/ * exception return is done in z_arm_int_exit() * /
301
- ldr r0 , =z_arm_int_exit
302
- bx r0
432
+ b z_arm_int_exit
303
433
#endif
304
434
305
435
_oops:
0 commit comments