|
| 1 | +/* |
| 2 | + * Copyright (c) 2006-2018, RT-Thread Development Team |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + * |
| 6 | + * Change Logs: |
| 7 | + * Date Author Notes |
| 8 | + * 2009-10-11 Bernard first version |
| 9 | + * 2012-01-01 aozima support context switch load/store FPU register. |
| 10 | + * 2013-06-18 aozima add restore MSP feature. |
| 11 | + * 2013-06-23 aozima support lazy stack optimized. |
| 12 | + * 2018-07-24 aozima enhancement hard fault exception handler. |
| 13 | + */ |
| 14 | + |
| 15 | +/** |
| 16 | + * @addtogroup cortex-m4 |
| 17 | + */ |
| 18 | +/*@{*/ |
| 19 | + |
| 20 | +.cpu cortex-m4 |
| 21 | +.syntax unified |
| 22 | +.thumb |
| 23 | +.text |
| 24 | + |
| 25 | +.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ |
| 26 | +.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */ |
| 27 | +.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */ |
| 28 | +.equ NVIC_PENDSV_PRI, 0x00FF0000 /* PendSV priority value (lowest) */ |
| 29 | +.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */ |
| 30 | + |
| 31 | +/* |
| 32 | + * rt_base_t rt_hw_interrupt_disable(); |
| 33 | + */ |
| 34 | +.global rt_hw_interrupt_disable |
| 35 | +.type rt_hw_interrupt_disable, %function |
| 36 | +rt_hw_interrupt_disable: |
| 37 | + MRS r0, PRIMASK |
| 38 | + CPSID I |
| 39 | + BX LR |
| 40 | + |
| 41 | +/* |
| 42 | + * void rt_hw_interrupt_enable(rt_base_t level); |
| 43 | + */ |
| 44 | +.global rt_hw_interrupt_enable |
| 45 | +.type rt_hw_interrupt_enable, %function |
| 46 | +rt_hw_interrupt_enable: |
| 47 | + MSR PRIMASK, r0 |
| 48 | + BX LR |
| 49 | + |
| 50 | +/* |
| 51 | + * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); |
| 52 | + * r0 --> from |
| 53 | + * r1 --> to |
| 54 | + */ |
| 55 | +.global rt_hw_context_switch_interrupt |
| 56 | +.type rt_hw_context_switch_interrupt, %function |
| 57 | +.global rt_hw_context_switch |
| 58 | +.type rt_hw_context_switch, %function |
| 59 | + |
| 60 | +rt_hw_context_switch_interrupt: |
| 61 | +rt_hw_context_switch: |
| 62 | + /* set rt_thread_switch_interrupt_flag to 1 */ |
| 63 | + LDR r2, =rt_thread_switch_interrupt_flag |
| 64 | + LDR r3, [r2] |
| 65 | + CMP r3, #1 |
| 66 | + BEQ _reswitch |
| 67 | + MOV r3, #1 |
| 68 | + STR r3, [r2] |
| 69 | + |
| 70 | + LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ |
| 71 | + STR r0, [r2] |
| 72 | + |
| 73 | +_reswitch: |
| 74 | + LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ |
| 75 | + STR r1, [r2] |
| 76 | + |
| 77 | + LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ |
| 78 | + LDR r1, =NVIC_PENDSVSET |
| 79 | + STR r1, [r0] |
| 80 | + BX LR |
| 81 | + |
| 82 | +/* r0 --> switch from thread stack |
| 83 | + * r1 --> switch to thread stack |
| 84 | + * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack |
| 85 | + */ |
| 86 | +.global PendSV_Handler |
| 87 | +.type PendSV_Handler, %function |
| 88 | +PendSV_Handler: |
| 89 | + /* disable interrupt to protect context switch */ |
| 90 | + MRS r2, PRIMASK |
| 91 | + CPSID I |
| 92 | + |
| 93 | + /* get rt_thread_switch_interrupt_flag */ |
| 94 | + LDR r0, =rt_thread_switch_interrupt_flag /* r0 = &rt_thread_switch_interrupt_flag */ |
| 95 | + LDR r1, [r0] /* r1 = *r1 */ |
| 96 | + CMP r1, #0x00 /* compare r1 == 0x00 */ |
| 97 | + BNE schedule |
| 98 | + MSR PRIMASK, r2 /* if r1 == 0x00, do msr PRIMASK, r2 */ |
| 99 | + BX lr /* if r1 == 0x00, do bx lr */ |
| 100 | + |
| 101 | +schedule |
| 102 | + PUSH {r2} /* store interrupt state */ |
| 103 | + |
| 104 | + /* clear rt_thread_switch_interrupt_flag to 0 */ |
| 105 | + MOV r1, #0x00 /* r1 = 0x00 */ |
| 106 | + STR r1, [r0] /* *r0 = r1 */ |
| 107 | + |
| 108 | + /* skip register save at the first time */ |
| 109 | + LDR r0, =rt_interrupt_from_thread /* r0 = &rt_interrupt_from_thread */ |
| 110 | + LDR r1, [r0] /* r1 = *r0 */ |
| 111 | + CBZ r1, switch_to_thread /* if r1 == 0, goto switch_to_thread */ |
| 112 | + |
| 113 | + /* Whether TrustZone thread stack exists */ |
| 114 | + LDR r1, =rt_trustzone_current_context /* r1 = &rt_secure_current_context */ |
| 115 | + LDR r1, [r1] /* r1 = *r1 */ |
| 116 | + CBZ r1, contex_ns_store /* if r1 == 0, goto contex_ns_store */ |
| 117 | + |
| 118 | + /*call TrustZone fun, Save TrustZone stack */ |
| 119 | + STMFD sp!, {r0-r1, lr} /* push register */ |
| 120 | + MOV r0, r1 /* r0 = rt_secure_current_context */ |
| 121 | + BL rt_trustzone_context_store /* call TrustZone store fun */ |
| 122 | + LDMFD sp!, {r0-r1, lr} /* pop register */ |
| 123 | + |
| 124 | + /* check break from TrustZone */ |
| 125 | + MOV r2, lr /* r2 = lr */ |
| 126 | + TST r2, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */ |
| 127 | + BEQ contex_ns_store /* if r2 & 0x40 == 0, goto contex_ns_store */ |
| 128 | + |
| 129 | + /* push PSPLIM CONTROL PSP LR current_context to stack */ |
| 130 | + MRS r3, psplim /* r3 = psplim */ |
| 131 | + MRS r4, control /* r4 = control */ |
| 132 | + MRS r5, psp /* r5 = psp */ |
| 133 | + STMFD r5!, {r1-r4} /* push to thread stack */ |
| 134 | + |
| 135 | + /* update from thread stack pointer */ |
| 136 | + LDR r0, [r0] /* r0 = rt_thread_switch_interrupt_flag */ |
| 137 | + STR r5, [r0] /* *r0 = r5 */ |
| 138 | + b switch_to_thread /* goto switch_to_thread */ |
| 139 | + |
| 140 | +contex_ns_store: |
| 141 | + |
| 142 | + MRS r1, psp /* get from thread stack pointer */ |
| 143 | + |
| 144 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 145 | + TST lr, #0x10 /* if(!EXC_RETURN[4]) */ |
| 146 | + VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */ |
| 147 | +#endif |
| 148 | + |
| 149 | + STMFD r1!, {r4 - r11} /* push r4 - r11 register */ |
| 150 | + |
| 151 | + LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */ |
| 152 | + LDR r2, [r2] /* r2 = *r2 */ |
| 153 | + MOV r3, lr /* r3 = lr */ |
| 154 | + MRS r4, psplim /* r4 = psplim */ |
| 155 | + MRS r5, control /* r5 = control */ |
| 156 | + STMFD r1!, {r2-r5} /* push to thread stack */ |
| 157 | + |
| 158 | + LDR r0, [r0] |
| 159 | + STR r1, [r0] /* update from thread stack pointer */ |
| 160 | + |
| 161 | +switch_to_thread: |
| 162 | + LDR r1, =rt_interrupt_to_thread |
| 163 | + LDR r1, [r1] |
| 164 | + LDR r1, [r1] /* load thread stack pointer */ |
| 165 | + |
| 166 | + /* update current TrustZone context */ |
| 167 | + LDMFD r1!, {r2-r5} /* pop thread stack */ |
| 168 | + MSR psplim, r4 /* psplim = r4 */ |
| 169 | + MSR control, r5 /* control = r5 */ |
| 170 | + MOV lr, r3 /* lr = r3 */ |
| 171 | + LDR r6, =rt_trustzone_current_context /* r6 = &rt_secure_current_context */ |
| 172 | + STR r2, [r6] /* *r6 = r2 */ |
| 173 | + MOV r0, r2 /* r0 = r2 */ |
| 174 | + |
| 175 | + /* Whether TrustZone thread stack exists */ |
| 176 | + CBZ r0, contex_ns_load /* if r0 == 0, goto contex_ns_load */ |
| 177 | + PUSH {r1, r3} /* push lr, thread_stack */ |
| 178 | + BL rt_trustzone_context_load /* call TrustZone load fun */ |
| 179 | + POP {r1, r3} /* pop lr, thread_stack */ |
| 180 | + MOV lr, r3 /* lr = r1 */ |
| 181 | + TST r3, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */ |
| 182 | + BEQ contex_ns_load /* if r1 & 0x40 == 0, goto contex_ns_load */ |
| 183 | + B pendsv_exit |
| 184 | + |
| 185 | +contex_ns_load: |
| 186 | + LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */ |
| 187 | + |
| 188 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 189 | + TST lr, #0x10 /* if(!EXC_RETURN[4]) */ |
| 190 | + VLDMIAEQ r1!, {d8 - d15} /* pop FPU register s16~s31 */ |
| 191 | +#endif |
| 192 | + |
| 193 | +pendsv_exit: |
| 194 | + MSR psp, r1 /* update stack pointer */ |
| 195 | + /* restore interrupt */ |
| 196 | + POP {r2} |
| 197 | + MSR PRIMASK, r2 |
| 198 | + |
| 199 | + BX lr |
| 200 | + |
| 201 | +/* |
| 202 | + * void rt_hw_context_switch_to(rt_uint32 to); |
| 203 | + * r0 --> to |
| 204 | + */ |
| 205 | +.global rt_hw_context_switch_to |
| 206 | +.type rt_hw_context_switch_to, %function |
| 207 | +rt_hw_context_switch_to: |
| 208 | + LDR r1, =rt_interrupt_to_thread |
| 209 | + STR r0, [r1] |
| 210 | + |
| 211 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 212 | + /* CLEAR CONTROL.FPCA */ |
| 213 | + MRS r2, CONTROL /* read */ |
| 214 | + BIC r2, #0x04 /* modify */ |
| 215 | + MSR CONTROL, r2 /* write-back */ |
| 216 | +#endif |
| 217 | + |
| 218 | + /* set from thread to 0 */ |
| 219 | + LDR r1, =rt_interrupt_from_thread |
| 220 | + MOV r0, #0x0 |
| 221 | + STR r0, [r1] |
| 222 | + |
| 223 | + /* set interrupt flag to 1 */ |
| 224 | + LDR r1, =rt_thread_switch_interrupt_flag |
| 225 | + MOV r0, #1 |
| 226 | + STR r0, [r1] |
| 227 | + |
| 228 | + /* set the PendSV exception priority */ |
| 229 | + LDR r0, =NVIC_SYSPRI2 |
| 230 | + LDR r1, =NVIC_PENDSV_PRI |
| 231 | + LDR.W r2, [r0,#0x00] /* read */ |
| 232 | + ORR r1,r1,r2 /* modify */ |
| 233 | + STR r1, [r0] /* write-back */ |
| 234 | + |
| 235 | + LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ |
| 236 | + LDR r1, =NVIC_PENDSVSET |
| 237 | + STR r1, [r0] |
| 238 | + |
| 239 | + /* restore MSP */ |
| 240 | + LDR r0, =SCB_VTOR |
| 241 | + LDR r0, [r0] |
| 242 | + LDR r0, [r0] |
| 243 | + NOP |
| 244 | + MSR msp, r0 |
| 245 | + |
| 246 | + /* enable interrupts at processor level */ |
| 247 | + CPSIE F |
| 248 | + CPSIE I |
| 249 | + |
| 250 | + /* never reach here! */ |
| 251 | + |
| 252 | +/* compatible with old version */ |
| 253 | +.global rt_hw_interrupt_thread_switch |
| 254 | +.type rt_hw_interrupt_thread_switch, %function |
| 255 | +rt_hw_interrupt_thread_switch: |
| 256 | + BX lr |
| 257 | + NOP |
| 258 | + |
| 259 | +.global HardFault_Handler |
| 260 | +.type HardFault_Handler, %function |
| 261 | +HardFault_Handler: |
| 262 | + /* get current context */ |
| 263 | + MRS r0, msp /* get fault context from handler. */ |
| 264 | + TST lr, #0x04 /* if(!EXC_RETURN[2]) */ |
| 265 | + BEQ get_sp_done |
| 266 | + MRS r0, psp /* get fault context from thread. */ |
| 267 | +get_sp_done: |
| 268 | + |
| 269 | + STMFD r0!, {r4 - r11} /* push r4 - r11 register */ |
| 270 | + |
| 271 | + LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */ |
| 272 | + LDR r2, [r2] /* r2 = *r2 */ |
| 273 | + MOV r3, lr /* r3 = lr */ |
| 274 | + MRS r4, psplim /* r4 = psplim */ |
| 275 | + MRS r5, control /* r5 = control */ |
| 276 | + STMFD r0!, {r2-r5} /* push to thread stack */ |
| 277 | + |
| 278 | + STMFD r0!, {lr} /* push exec_return register */ |
| 279 | + |
| 280 | + TST lr, #0x04 /* if(!EXC_RETURN[2]) */ |
| 281 | + BEQ update_msp |
| 282 | + MSR psp, r0 /* update stack pointer to PSP. */ |
| 283 | + B update_done |
| 284 | +update_msp: |
| 285 | + MSR msp, r0 /* update stack pointer to MSP. */ |
| 286 | +update_done: |
| 287 | + |
| 288 | + PUSH {LR} |
| 289 | + BL rt_hw_hard_fault_exception |
| 290 | + POP {LR} |
| 291 | + |
| 292 | + ORR lr, lr, #0x04 |
| 293 | + BX lr |
0 commit comments