|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Fixmap manipulation code |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/bug.h> |
| 7 | +#include <linux/init.h> |
| 8 | +#include <linux/kernel.h> |
| 9 | +#include <linux/libfdt.h> |
| 10 | +#include <linux/memory.h> |
| 11 | +#include <linux/mm.h> |
| 12 | +#include <linux/sizes.h> |
| 13 | + |
| 14 | +#include <asm/fixmap.h> |
| 15 | +#include <asm/kernel-pgtable.h> |
| 16 | +#include <asm/pgalloc.h> |
| 17 | +#include <asm/tlbflush.h> |
| 18 | + |
| 19 | +#define NR_BM_PTE_TABLES \ |
| 20 | + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) |
| 21 | +#define NR_BM_PMD_TABLES \ |
| 22 | + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) |
| 23 | + |
| 24 | +static_assert(NR_BM_PMD_TABLES == 1); |
| 25 | + |
| 26 | +#define __BM_TABLE_IDX(addr, shift) \ |
| 27 | + (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) |
| 28 | + |
| 29 | +#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) |
| 30 | + |
| 31 | +static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; |
| 32 | +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; |
| 33 | +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; |
| 34 | + |
| 35 | +static inline pte_t *fixmap_pte(unsigned long addr) |
| 36 | +{ |
| 37 | + return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; |
| 38 | +} |
| 39 | + |
| 40 | +static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) |
| 41 | +{ |
| 42 | + pmd_t pmd = READ_ONCE(*pmdp); |
| 43 | + pte_t *ptep; |
| 44 | + |
| 45 | + if (pmd_none(pmd)) { |
| 46 | + ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; |
| 47 | + __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE); |
| 48 | + } |
| 49 | +} |
| 50 | + |
| 51 | +static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, |
| 52 | + unsigned long end) |
| 53 | +{ |
| 54 | + unsigned long next; |
| 55 | + pud_t pud = READ_ONCE(*pudp); |
| 56 | + pmd_t *pmdp; |
| 57 | + |
| 58 | + if (pud_none(pud)) |
| 59 | + __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); |
| 60 | + |
| 61 | + pmdp = pmd_offset_kimg(pudp, addr); |
| 62 | + do { |
| 63 | + next = pmd_addr_end(addr, end); |
| 64 | + early_fixmap_init_pte(pmdp, addr); |
| 65 | + } while (pmdp++, addr = next, addr != end); |
| 66 | +} |
| 67 | + |
| 68 | + |
| 69 | +static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, |
| 70 | + unsigned long end) |
| 71 | +{ |
| 72 | + p4d_t p4d = READ_ONCE(*p4dp); |
| 73 | + pud_t *pudp; |
| 74 | + |
| 75 | + if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && |
| 76 | + p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { |
| 77 | + /* |
| 78 | + * We only end up here if the kernel mapping and the fixmap |
| 79 | + * share the top level pgd entry, which should only happen on |
| 80 | + * 16k/4 levels configurations. |
| 81 | + */ |
| 82 | + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); |
| 83 | + } |
| 84 | + |
| 85 | + if (p4d_none(p4d)) |
| 86 | + __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); |
| 87 | + |
| 88 | + pudp = pud_offset_kimg(p4dp, addr); |
| 89 | + early_fixmap_init_pmd(pudp, addr, end); |
| 90 | +} |
| 91 | + |
| 92 | +/* |
| 93 | + * The p*d_populate functions call virt_to_phys implicitly so they can't be used |
| 94 | + * directly on kernel symbols (bm_p*d). This function is called too early to use |
| 95 | + * lm_alias so __p*d_populate functions must be used to populate with the |
| 96 | + * physical address from __pa_symbol. |
| 97 | + */ |
| 98 | +void __init early_fixmap_init(void) |
| 99 | +{ |
| 100 | + unsigned long addr = FIXADDR_TOT_START; |
| 101 | + unsigned long end = FIXADDR_TOP; |
| 102 | + |
| 103 | + pgd_t *pgdp = pgd_offset_k(addr); |
| 104 | + p4d_t *p4dp = p4d_offset(pgdp, addr); |
| 105 | + |
| 106 | + early_fixmap_init_pud(p4dp, addr, end); |
| 107 | +} |
| 108 | + |
| 109 | +/* |
| 110 | + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we |
| 111 | + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. |
| 112 | + */ |
| 113 | +void __set_fixmap(enum fixed_addresses idx, |
| 114 | + phys_addr_t phys, pgprot_t flags) |
| 115 | +{ |
| 116 | + unsigned long addr = __fix_to_virt(idx); |
| 117 | + pte_t *ptep; |
| 118 | + |
| 119 | + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
| 120 | + |
| 121 | + ptep = fixmap_pte(addr); |
| 122 | + |
| 123 | + if (pgprot_val(flags)) { |
| 124 | + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); |
| 125 | + } else { |
| 126 | + pte_clear(&init_mm, addr, ptep); |
| 127 | + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); |
| 128 | + } |
| 129 | +} |
| 130 | + |
| 131 | +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
| 132 | +{ |
| 133 | + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
| 134 | + phys_addr_t dt_phys_base; |
| 135 | + int offset; |
| 136 | + void *dt_virt; |
| 137 | + |
| 138 | + /* |
| 139 | + * Check whether the physical FDT address is set and meets the minimum |
| 140 | + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be |
| 141 | + * at least 8 bytes so that we can always access the magic and size |
| 142 | + * fields of the FDT header after mapping the first chunk, double check |
| 143 | + * here if that is indeed the case. |
| 144 | + */ |
| 145 | + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); |
| 146 | + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) |
| 147 | + return NULL; |
| 148 | + |
| 149 | + dt_phys_base = round_down(dt_phys, PAGE_SIZE); |
| 150 | + offset = dt_phys % PAGE_SIZE; |
| 151 | + dt_virt = (void *)dt_virt_base + offset; |
| 152 | + |
| 153 | + /* map the first chunk so we can read the size from the header */ |
| 154 | + create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); |
| 155 | + |
| 156 | + if (fdt_magic(dt_virt) != FDT_MAGIC) |
| 157 | + return NULL; |
| 158 | + |
| 159 | + *size = fdt_totalsize(dt_virt); |
| 160 | + if (*size > MAX_FDT_SIZE) |
| 161 | + return NULL; |
| 162 | + |
| 163 | + if (offset + *size > PAGE_SIZE) { |
| 164 | + create_mapping_noalloc(dt_phys_base, dt_virt_base, |
| 165 | + offset + *size, prot); |
| 166 | + } |
| 167 | + |
| 168 | + return dt_virt; |
| 169 | +} |
| 170 | + |
| 171 | +/* |
| 172 | + * Copy the fixmap region into a new pgdir. |
| 173 | + */ |
| 174 | +void __init fixmap_copy(pgd_t *pgdir) |
| 175 | +{ |
| 176 | + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) { |
| 177 | + /* |
| 178 | + * The fixmap falls in a separate pgd to the kernel, and doesn't |
| 179 | + * live in the carveout for the swapper_pg_dir. We can simply |
| 180 | + * re-use the existing dir for the fixmap. |
| 181 | + */ |
| 182 | + set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START), |
| 183 | + READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START))); |
| 184 | + } else if (CONFIG_PGTABLE_LEVELS > 3) { |
| 185 | + pgd_t *bm_pgdp; |
| 186 | + p4d_t *bm_p4dp; |
| 187 | + pud_t *bm_pudp; |
| 188 | + /* |
| 189 | + * The fixmap shares its top level pgd entry with the kernel |
| 190 | + * mapping. This can really only occur when we are running |
| 191 | + * with 16k/4 levels, so we can simply reuse the pud level |
| 192 | + * entry instead. |
| 193 | + */ |
| 194 | + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); |
| 195 | + bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START); |
| 196 | + bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START); |
| 197 | + bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START); |
| 198 | + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); |
| 199 | + pud_clear_fixmap(); |
| 200 | + } else { |
| 201 | + BUG(); |
| 202 | + } |
| 203 | +} |
0 commit comments