|
| 1 | +/* |
| 2 | + * SPDX-FileCopyrightText: 2026 Espressif Systems (Shanghai) CO LTD |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 OR MIT |
| 5 | + */ |
| 6 | + |
| 7 | +#include <stddef.h> |
| 8 | +#include <string.h> |
| 9 | + |
| 10 | +#include <esp-stub-lib/bit_utils.h> |
| 11 | +#include <esp-stub-lib/cache.h> |
| 12 | +#include <esp-stub-lib/flash.h> |
| 13 | +#include <esp-stub-lib/log.h> |
| 14 | +#include <esp-stub-lib/security.h> |
| 15 | + |
| 16 | +#include <target/mmu.h> |
| 17 | + |
| 18 | +#define STUB_MMAP_MAX_PAGES 8 // TODO: check if 8 pages is enough |
| 19 | + |
| 20 | +static struct { |
| 21 | + uint32_t vaddr_base; |
| 22 | + uint32_t entry_start; |
| 23 | + uint32_t page_count; |
| 24 | + uint32_t saved_entries[STUB_MMAP_MAX_PAGES]; |
| 25 | +} s_mmap_state; |
| 26 | + |
| 27 | +int stub_lib_flash_mmap(uint32_t flash_paddr, uint32_t size, const void **out_vaddr) |
| 28 | +{ |
| 29 | + uint32_t aligned = ALIGN_DOWN(flash_paddr, STUB_MMU_PAGE_SIZE); |
| 30 | + uint32_t offset = flash_paddr - aligned; |
| 31 | + uint32_t map_size = size + offset; |
| 32 | + uint32_t page_count = (map_size + STUB_MMU_PAGE_SIZE - 1) >> STUB_MMU_PAGE_SHIFT; |
| 33 | + |
| 34 | + STUB_LOGD("aligned: %x, offset: %x, map_size: %x, page_count: %d\n", aligned, offset, map_size, page_count); |
| 35 | + |
| 36 | + if (page_count == 0 || page_count > STUB_MMAP_MAX_PAGES) { |
| 37 | + STUB_LOGE("invalid page_count: %d\n", page_count); |
| 38 | + return -1; |
| 39 | + } |
| 40 | + |
| 41 | + uint32_t drom_start = stub_target_mmu_get_drom_entry_start(); |
| 42 | + uint32_t drom_end = stub_target_mmu_get_drom_entry_end(); |
| 43 | + uint32_t drom_count = drom_end - drom_start; |
| 44 | + if (page_count > drom_count) { |
| 45 | + STUB_LOGE("invalid page_count: %d, drom_count: %d\n", page_count, drom_count); |
| 46 | + return -1; |
| 47 | + } |
| 48 | + |
| 49 | + /* Use the last N entries of the DROM region (least likely to conflict) */ |
| 50 | + uint32_t entry_start = drom_end - page_count; |
| 51 | + uint32_t drom_vaddr = stub_target_mmu_get_drom_vaddr(); |
| 52 | + uint32_t vaddr_base = drom_vaddr + (entry_start - drom_start) * STUB_MMU_PAGE_SIZE; |
| 53 | + |
| 54 | + STUB_LOGD("drom_start: %d, drom_end: %d, entry_start: %d, vaddr_base: %x\n", |
| 55 | + drom_start, |
| 56 | + drom_end, |
| 57 | + entry_start, |
| 58 | + vaddr_base); |
| 59 | + |
| 60 | + uint32_t autoload = stub_lib_cache_suspend(); |
| 61 | + |
| 62 | + /* Save existing MMU entries */ |
| 63 | + for (uint32_t i = 0; i < page_count; i++) |
| 64 | + s_mmap_state.saved_entries[i] = stub_target_mmu_read_entry(entry_start + i); |
| 65 | + |
| 66 | + /* Write new flash mappings */ |
| 67 | + uint32_t flash_page = aligned >> STUB_MMU_PAGE_SHIFT; |
| 68 | + for (uint32_t i = 0; i < page_count; i++) |
| 69 | + stub_target_mmu_write_entry(entry_start + i, flash_page + i); |
| 70 | + |
| 71 | + s_mmap_state.vaddr_base = vaddr_base; |
| 72 | + s_mmap_state.entry_start = entry_start; |
| 73 | + s_mmap_state.page_count = page_count; |
| 74 | + |
| 75 | + stub_lib_cache_resume(autoload); |
| 76 | + |
| 77 | + /* Invalidate cache for the mapped region */ |
| 78 | + uint32_t caps = stub_lib_cache_get_caps(); |
| 79 | + if (caps & STUB_CACHE_CAP_HAS_INVALIDATE_ADDR) |
| 80 | + stub_lib_cache_invalidate_addr(vaddr_base, page_count * STUB_MMU_PAGE_SIZE); |
| 81 | + else |
| 82 | + stub_lib_cache_invalidate_all(); |
| 83 | + |
| 84 | + *out_vaddr = (const void *)(vaddr_base + offset); |
| 85 | + |
| 86 | + STUB_LOGD("mmap: 0x%x is mapped to 0x%x\n", flash_paddr, *out_vaddr); |
| 87 | + |
| 88 | + return 0; |
| 89 | +} |
| 90 | + |
| 91 | +void stub_lib_flash_munmap(void) |
| 92 | +{ |
| 93 | + if (s_mmap_state.page_count == 0) |
| 94 | + return; |
| 95 | + |
| 96 | + uint32_t autoload = stub_lib_cache_suspend(); |
| 97 | + |
| 98 | + /* Restore saved MMU entries */ |
| 99 | + for (uint32_t i = 0; i < s_mmap_state.page_count; i++) |
| 100 | + stub_target_mmu_restore_entry(s_mmap_state.entry_start + i, s_mmap_state.saved_entries[i]); |
| 101 | + |
| 102 | + stub_lib_cache_resume(autoload); |
| 103 | + |
| 104 | + /* Invalidate stale cache lines */ |
| 105 | + uint32_t caps = stub_lib_cache_get_caps(); |
| 106 | + if (caps & STUB_CACHE_CAP_HAS_INVALIDATE_ADDR) |
| 107 | + stub_lib_cache_invalidate_addr(s_mmap_state.vaddr_base, s_mmap_state.page_count * STUB_MMU_PAGE_SIZE); |
| 108 | + else |
| 109 | + stub_lib_cache_invalidate_all(); |
| 110 | + |
| 111 | + s_mmap_state.page_count = 0; |
| 112 | +} |
| 113 | + |
| 114 | +int stub_lib_flash_read(uint32_t addr, void *buffer, uint32_t size) |
| 115 | +{ |
| 116 | + // if (!stub_lib_security_flash_is_encrypted()) |
| 117 | + if (0) |
| 118 | + return stub_lib_flash_read_buff(addr, buffer, size); |
| 119 | + |
| 120 | + /* Encrypted flash: read through cache via MMU mapping */ |
| 121 | + const void *vaddr; |
| 122 | + int rc = stub_lib_flash_mmap(addr, size, &vaddr); |
| 123 | + if (rc != 0) { |
| 124 | + STUB_LOGE("mmap failed (%d) @ %x\n", rc, addr); |
| 125 | + return rc; |
| 126 | + } |
| 127 | + |
| 128 | + memcpy(buffer, vaddr, size); |
| 129 | + stub_lib_flash_munmap(); |
| 130 | + |
| 131 | + return 0; |
| 132 | +} |
0 commit comments