|
33 | 33 | // wrapper around everything in this file |
34 | 34 | #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB |
35 | 35 |
|
| 36 | +#include "py/mpstate.h" |
| 37 | +#include "py/persistentcode.h" |
36 | 38 | #include "py/mphal.h" |
37 | 39 | #include "py/asmthumb.h" |
38 | 40 |
|
@@ -118,6 +120,21 @@ STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) { |
118 | 120 | void asm_thumb_entry(asm_thumb_t *as, int num_locals) { |
119 | 121 | assert(num_locals >= 0); |
120 | 122 |
|
| 123 | + // If this Thumb machine code is run from ARM state then add a prelude |
| 124 | + // to switch to Thumb state for the duration of the function. |
| 125 | + #if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__)) |
| 126 | + #if MICROPY_DYNAMIC_COMPILER |
| 127 | + if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_ARMV6) |
| 128 | + #endif |
| 129 | + { |
| 130 | + asm_thumb_op32(as, 0x4010, 0xe92d); // push {r4, lr} |
| 131 | + asm_thumb_op32(as, 0xe009, 0xe28f); // add lr, pc, 8 + 1 |
| 132 | + asm_thumb_op32(as, 0xff3e, 0xe12f); // blx lr |
| 133 | + asm_thumb_op32(as, 0x4010, 0xe8bd); // pop {r4, lr} |
| 134 | + asm_thumb_op32(as, 0xff1e, 0xe12f); // bx lr |
| 135 | + } |
| 136 | + #endif |
| 137 | + |
121 | 138 | // work out what to push and how many extra spaces to reserve on stack |
122 | 139 | // so that we have enough for all locals and it's aligned an 8-byte boundary |
123 | 140 | // we push extra regs (r1, r2, r3) to help do the stack adjustment |
@@ -225,10 +242,12 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) { |
225 | 242 | } |
226 | 243 |
|
227 | 244 | // if loading lo half with movw, the i16 value will be zero extended into the r32 register! |
228 | | -void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) { |
| 245 | +size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) { |
229 | 246 | assert(reg_dest < ASM_THUMB_REG_R15); |
| 247 | + size_t loc = mp_asm_base_get_code_pos(&as->base); |
230 | 248 | // mov[wt] reg_dest, #i16_src |
231 | 249 | asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff)); |
| 250 | + return loc; |
232 | 251 | } |
233 | 252 |
|
234 | 253 | #define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff)) |
@@ -271,12 +290,16 @@ bool asm_thumb_bl_label(asm_thumb_t *as, uint label) { |
271 | 290 | return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel); |
272 | 291 | } |
273 | 292 |
|
274 | | -void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) { |
| 293 | +size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) { |
275 | 294 | // movw, movt does it in 8 bytes |
276 | 295 | // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw |
277 | 296 |
|
| 297 | + size_t loc = mp_asm_base_get_code_pos(&as->base); |
| 298 | + |
278 | 299 | asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32); |
279 | 300 | asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16); |
| 301 | + |
| 302 | + return loc; |
280 | 303 | } |
281 | 304 |
|
282 | 305 | void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { |
|
0 commit comments