|
| 1 | +/* |
| 2 | + * Copyright (c) 2006-2018, RT-Thread Development Team |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + * |
| 6 | + * Change Logs: |
| 7 | + * Date Author Notes |
| 8 | + * 2009-10-11 Bernard first version |
| 9 | + * 2012-01-01 aozima support context switch load/store FPU register. |
| 10 | + * 2013-06-18 aozima add restore MSP feature. |
| 11 | + * 2013-06-23 aozima support lazy stack optimized. |
| 12 | + * 2018-07-24 aozima enhancement hard fault exception handler. |
| 13 | + */ |
| 14 | + |
| 15 | + .cpu cortex-m7 |
| 16 | + .syntax unified |
| 17 | + .thumb |
| 18 | + .text |
| 19 | + |
| 20 | + .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ |
| 21 | + .equ ICSR, 0xE000ED04 /* interrupt control state register */ |
| 22 | + .equ PENDSVSET_BIT, 0x10000000 /* value to trigger PendSV exception */ |
| 23 | + |
| 24 | + .equ SHPR3, 0xE000ED20 /* system priority register (3) */ |
| 25 | + .equ PENDSV_PRI_LOWEST, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ |
| 26 | + |
| 27 | +/* |
| 28 | + * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); |
| 29 | + * R0 --> from |
| 30 | + * R1 --> to |
| 31 | + */ |
| 32 | + .global rt_hw_context_switch_interrupt |
| 33 | + .type rt_hw_context_switch_interrupt, %function |
| 34 | + .global rt_hw_context_switch |
| 35 | + .type rt_hw_context_switch, %function |
| 36 | +rt_hw_context_switch_interrupt: |
| 37 | +rt_hw_context_switch: |
| 38 | + /* set rt_thread_switch_interrupt_flag to 1 */ |
| 39 | + LDR R2, =rt_thread_switch_interrupt_flag |
| 40 | + LDR R3, [R2] |
| 41 | + CMP R3, #1 |
| 42 | + BEQ _reswitch |
| 43 | + MOV R3, #1 |
| 44 | + STR R3, [R2] |
| 45 | + |
| 46 | + LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ |
| 47 | + STR R0, [R2] |
| 48 | + |
| 49 | +_reswitch: |
| 50 | + LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ |
| 51 | + STR R1, [R2] |
| 52 | + |
| 53 | + LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */ |
| 54 | + LDR R1, =PENDSVSET_BIT |
| 55 | + STR R1, [R0] |
| 56 | + BX LR |
| 57 | + |
| 58 | +/* R0 --> switch from thread stack |
| 59 | + * R1 --> switch to thread stack |
| 60 | + * psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack |
| 61 | + */ |
| 62 | + .global PendSV_Handler |
| 63 | + .type PendSV_Handler, %function |
| 64 | +PendSV_Handler: |
| 65 | + /* disable interrupt to protect context switch */ |
| 66 | + MRS R2, PRIMASK |
| 67 | + CPSID I |
| 68 | + |
| 69 | + /* get rt_thread_switch_interrupt_flag */ |
| 70 | + LDR R0, =rt_thread_switch_interrupt_flag |
| 71 | + LDR R1, [R0] |
| 72 | + CBZ R1, pendsv_exit /* pendsv already handled */ |
| 73 | + |
| 74 | + /* clear rt_thread_switch_interrupt_flag to 0 */ |
| 75 | + MOV R1, #0 |
| 76 | + STR R1, [R0] |
| 77 | + |
| 78 | + LDR R0, =rt_interrupt_from_thread |
| 79 | + LDR R1, [R0] |
| 80 | + CBZ R1, switch_to_thread /* skip register save at the first time */ |
| 81 | + |
| 82 | + MRS R1, PSP /* get from thread stack pointer */ |
| 83 | + |
| 84 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 85 | + TST lr, #0x10 /* if(!EXC_RETURN[4]) */ |
| 86 | + |
| 87 | + IT EQ |
| 88 | + VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */ |
| 89 | +#endif |
| 90 | + |
| 91 | + STMFD R1!, {R4 - R11} /* push R4 - R11 register */ |
| 92 | + |
| 93 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 94 | + MOV r4, #0x00 /* flag = 0 */ |
| 95 | + |
| 96 | + TST lr, #0x10 /* if(!EXC_RETURN[4]) */ |
| 97 | + |
| 98 | + IT EQ |
| 99 | + MOVEQ r4, #0x01 /* flag = 1 */ |
| 100 | + |
| 101 | + STMFD r1!, {r4} /* push flag */ |
| 102 | +#endif |
| 103 | + |
| 104 | + LDR R0, [R0] |
| 105 | + STR R1, [R0] /* update from thread stack pointer */ |
| 106 | + |
| 107 | +switch_to_thread: |
| 108 | + LDR R1, =rt_interrupt_to_thread |
| 109 | + LDR R1, [R1] |
| 110 | + LDR R1, [R1] /* load thread stack pointer */ |
| 111 | + |
| 112 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 113 | + LDMFD r1!, {r3} /* pop flag */ |
| 114 | +#endif |
| 115 | + |
| 116 | + LDMFD R1!, {R4 - R11} /* pop R4 - R11 register */ |
| 117 | + |
| 118 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 119 | + CMP r3, #0 /* if(flag_r3 != 0) */ |
| 120 | + |
| 121 | + IT NE |
| 122 | + VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */ |
| 123 | +#endif |
| 124 | + |
| 125 | + MSR PSP, R1 /* update stack pointer */ |
| 126 | + |
| 127 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 128 | + ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */ |
| 129 | + CMP r3, #0 /* if(flag_r3 != 0) */ |
| 130 | + |
| 131 | + IT NE |
| 132 | + BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */ |
| 133 | +#endif |
| 134 | + |
| 135 | +pendsv_exit: |
| 136 | + /* restore interrupt */ |
| 137 | + MSR PRIMASK, R2 |
| 138 | + |
| 139 | + ORR LR, LR, #0x04 |
| 140 | + BX LR |
| 141 | + |
| 142 | +/* |
| 143 | + * void rt_hw_context_switch_to(rt_uint32 to); |
| 144 | + * R0 --> to |
| 145 | + */ |
| 146 | + .global rt_hw_context_switch_to |
| 147 | + .type rt_hw_context_switch_to, %function |
| 148 | +rt_hw_context_switch_to: |
| 149 | + LDR R1, =rt_interrupt_to_thread |
| 150 | + STR R0, [R1] |
| 151 | + |
| 152 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 153 | + /* CLEAR CONTROL.FPCA */ |
| 154 | + MRS r2, CONTROL /* read */ |
| 155 | + BIC r2, #0x04 /* modify */ |
| 156 | + MSR CONTROL, r2 /* write-back */ |
| 157 | +#endif |
| 158 | + |
| 159 | + /* set from thread to 0 */ |
| 160 | + LDR R1, =rt_interrupt_from_thread |
| 161 | + MOV R0, #0 |
| 162 | + STR R0, [R1] |
| 163 | + |
| 164 | + /* set interrupt flag to 1 */ |
| 165 | + LDR R1, =rt_thread_switch_interrupt_flag |
| 166 | + MOV R0, #1 |
| 167 | + STR R0, [R1] |
| 168 | + |
| 169 | + /* set the PendSV and SysTick exception priority */ |
| 170 | + LDR R0, =SHPR3 |
| 171 | + LDR R1, =PENDSV_PRI_LOWEST |
| 172 | + LDR.W R2, [R0,#0] /* read */ |
| 173 | + ORR R1, R1, R2 /* modify */ |
| 174 | + STR R1, [R0] /* write-back */ |
| 175 | + |
| 176 | + LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */ |
| 177 | + LDR R1, =PENDSVSET_BIT |
| 178 | + STR R1, [R0] |
| 179 | + |
| 180 | + /* restore MSP */ |
| 181 | + LDR r0, =SCB_VTOR |
| 182 | + LDR r0, [r0] |
| 183 | + LDR r0, [r0] |
| 184 | + NOP |
| 185 | + MSR msp, r0 |
| 186 | + |
| 187 | + /* enable interrupts at processor level */ |
| 188 | + CPSIE F |
| 189 | + CPSIE I |
| 190 | + |
| 191 | + /* never reach here! */ |
| 192 | + |
| 193 | +/* compatible with old version */ |
| 194 | + .global rt_hw_interrupt_thread_switch |
| 195 | + .type rt_hw_interrupt_thread_switch, %function |
| 196 | +rt_hw_interrupt_thread_switch: |
| 197 | + BX LR |
| 198 | + NOP |
| 199 | + |
| 200 | + .global HardFault_Handler |
| 201 | + .type HardFault_Handler, %function |
| 202 | +HardFault_Handler: |
| 203 | + /* get current context */ |
| 204 | + MRS r0, msp /* get fault context from handler. */ |
| 205 | + TST lr, #0x04 /* if(!EXC_RETURN[2]) */ |
| 206 | + BEQ _get_sp_done |
| 207 | + MRS r0, psp /* get fault context from thread. */ |
| 208 | +_get_sp_done: |
| 209 | + STMFD r0!, {r4 - r11} /* push r4 - r11 register */ |
| 210 | +#if defined (__VFP_FP__) && !defined(__SOFTFP__) |
| 211 | + STMFD r0!, {lr} /* push dummy for flag */ |
| 212 | +#endif |
| 213 | + STMFD r0!, {lr} /* push exec_return register */ |
| 214 | + |
| 215 | + TST lr, #0x04 /* if(!EXC_RETURN[2]) */ |
| 216 | + BEQ _update_msp |
| 217 | + MSR psp, r0 /* update stack pointer to PSP. */ |
| 218 | + B _update_done |
| 219 | +_update_msp: |
| 220 | + MSR msp, r0 /* update stack pointer to MSP. */ |
| 221 | +_update_done: |
| 222 | + |
| 223 | + PUSH {LR} |
| 224 | + BL rt_hw_hard_fault_exception |
| 225 | + POP {LR} |
| 226 | + |
| 227 | + ORR LR, LR, #0x04 |
| 228 | + BX LR |
0 commit comments