Skip to content

Commit e8dcb61

Browse files
atishp04palmer-dabbelt
authored andcommitted
RISC-V: Implement late mapping page table allocation functions
Currently, page table setup is done during setup_va_final where fixmap can be used to create the temporary mappings. The physical frame is allocated from memblock_alloc_* functions. However, this won't work if page table mapping needs to be created for a different mm context (i.e. efi mm) at a later point of time. Use generic kernel page allocation function & macros for any mapping after setup_vm_final. Signed-off-by: Atish Patra <[email protected]> Reviewed-by: Anup Patel <[email protected]> Acked-by: Mike Rapoport <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 6262f66 commit e8dcb61

File tree

1 file changed

+99
-31
lines changed

1 file changed

+99
-31
lines changed

arch/riscv/mm/init.c

Lines changed: 99 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,15 @@ extern char _start[];
3232
void *dtb_early_va __initdata;
3333
uintptr_t dtb_early_pa __initdata;
3434

35+
struct pt_alloc_ops {
36+
pte_t *(*get_pte_virt)(phys_addr_t pa);
37+
phys_addr_t (*alloc_pte)(uintptr_t va);
38+
#ifndef __PAGETABLE_PMD_FOLDED
39+
pmd_t *(*get_pmd_virt)(phys_addr_t pa);
40+
phys_addr_t (*alloc_pmd)(uintptr_t va);
41+
#endif
42+
};
43+
3544
static void __init zone_sizes_init(void)
3645
{
3746
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -203,6 +212,8 @@ void __init setup_bootmem(void)
203212
}
204213

205214
#ifdef CONFIG_MMU
215+
static struct pt_alloc_ops pt_ops;
216+
206217
unsigned long va_pa_offset;
207218
EXPORT_SYMBOL(va_pa_offset);
208219
unsigned long pfn_base;
@@ -211,7 +222,6 @@ EXPORT_SYMBOL(pfn_base);
211222
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
212223
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
213224
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
214-
static bool mmu_enabled;
215225

216226
#define MAX_EARLY_MAPPING_SIZE SZ_128M
217227

@@ -234,27 +244,46 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
234244
}
235245
}
236246

237-
static pte_t *__init get_pte_virt(phys_addr_t pa)
247+
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
238248
{
239-
if (mmu_enabled) {
240-
clear_fixmap(FIX_PTE);
241-
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
242-
} else {
243-
return (pte_t *)((uintptr_t)pa);
244-
}
249+
return (pte_t *)((uintptr_t)pa);
245250
}
246251

247-
static phys_addr_t __init alloc_pte(uintptr_t va)
252+
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
253+
{
254+
clear_fixmap(FIX_PTE);
255+
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
256+
}
257+
258+
static inline pte_t *get_pte_virt_late(phys_addr_t pa)
259+
{
260+
return (pte_t *) __va(pa);
261+
}
262+
263+
static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
248264
{
249265
/*
250266
* We only create PMD or PGD early mappings so we
251267
* should never reach here with MMU disabled.
252268
*/
253-
BUG_ON(!mmu_enabled);
269+
BUG();
270+
}
254271

272+
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
273+
{
255274
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
256275
}
257276

277+
static phys_addr_t alloc_pte_late(uintptr_t va)
278+
{
279+
unsigned long vaddr;
280+
281+
vaddr = __get_free_page(GFP_KERNEL);
282+
if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
283+
BUG();
284+
return __pa(vaddr);
285+
}
286+
258287
static void __init create_pte_mapping(pte_t *ptep,
259288
uintptr_t va, phys_addr_t pa,
260289
phys_addr_t sz, pgprot_t prot)
@@ -279,28 +308,46 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
279308
#endif
280309
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
281310

282-
static pmd_t *__init get_pmd_virt(phys_addr_t pa)
311+
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
283312
{
284-
if (mmu_enabled) {
285-
clear_fixmap(FIX_PMD);
286-
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
287-
} else {
288-
return (pmd_t *)((uintptr_t)pa);
289-
}
313+
/* Before MMU is enabled */
314+
return (pmd_t *)((uintptr_t)pa);
290315
}
291316

292-
static phys_addr_t __init alloc_pmd(uintptr_t va)
317+
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
293318
{
294-
uintptr_t pmd_num;
319+
clear_fixmap(FIX_PMD);
320+
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
321+
}
322+
323+
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
324+
{
325+
return (pmd_t *) __va(pa);
326+
}
295327

296-
if (mmu_enabled)
297-
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
328+
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
329+
{
330+
uintptr_t pmd_num;
298331

299332
pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
300333
BUG_ON(pmd_num >= NUM_EARLY_PMDS);
301334
return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
302335
}
303336

337+
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
338+
{
339+
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
340+
}
341+
342+
static phys_addr_t alloc_pmd_late(uintptr_t va)
343+
{
344+
unsigned long vaddr;
345+
346+
vaddr = __get_free_page(GFP_KERNEL);
347+
BUG_ON(!vaddr);
348+
return __pa(vaddr);
349+
}
350+
304351
static void __init create_pmd_mapping(pmd_t *pmdp,
305352
uintptr_t va, phys_addr_t pa,
306353
phys_addr_t sz, pgprot_t prot)
@@ -316,28 +363,28 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
316363
}
317364

318365
if (pmd_none(pmdp[pmd_idx])) {
319-
pte_phys = alloc_pte(va);
366+
pte_phys = pt_ops.alloc_pte(va);
320367
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
321-
ptep = get_pte_virt(pte_phys);
368+
ptep = pt_ops.get_pte_virt(pte_phys);
322369
memset(ptep, 0, PAGE_SIZE);
323370
} else {
324371
pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
325-
ptep = get_pte_virt(pte_phys);
372+
ptep = pt_ops.get_pte_virt(pte_phys);
326373
}
327374

328375
create_pte_mapping(ptep, va, pa, sz, prot);
329376
}
330377

331378
#define pgd_next_t pmd_t
332-
#define alloc_pgd_next(__va) alloc_pmd(__va)
333-
#define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
379+
#define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va)
380+
#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
334381
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
335382
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
336383
#define fixmap_pgd_next fixmap_pmd
337384
#else
338385
#define pgd_next_t pte_t
339-
#define alloc_pgd_next(__va) alloc_pte(__va)
340-
#define get_pgd_next_virt(__pa) get_pte_virt(__pa)
386+
#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
387+
#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
341388
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
342389
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
343390
#define fixmap_pgd_next fixmap_pte
@@ -421,6 +468,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
421468
BUG_ON((load_pa % map_size) != 0);
422469
BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
423470

471+
pt_ops.alloc_pte = alloc_pte_early;
472+
pt_ops.get_pte_virt = get_pte_virt_early;
473+
#ifndef __PAGETABLE_PMD_FOLDED
474+
pt_ops.alloc_pmd = alloc_pmd_early;
475+
pt_ops.get_pmd_virt = get_pmd_virt_early;
476+
#endif
424477
/* Setup early PGD for fixmap */
425478
create_pgd_mapping(early_pg_dir, FIXADDR_START,
426479
(uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
@@ -497,9 +550,16 @@ static void __init setup_vm_final(void)
497550
phys_addr_t pa, start, end;
498551
struct memblock_region *reg;
499552

500-
/* Set mmu_enabled flag */
501-
mmu_enabled = true;
502-
553+
/**
554+
* MMU is enabled at this point. But page table setup is not complete yet.
555+
* fixmap page table alloc functions should be used at this point
556+
*/
557+
pt_ops.alloc_pte = alloc_pte_fixmap;
558+
pt_ops.get_pte_virt = get_pte_virt_fixmap;
559+
#ifndef __PAGETABLE_PMD_FOLDED
560+
pt_ops.alloc_pmd = alloc_pmd_fixmap;
561+
pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
562+
#endif
503563
/* Setup swapper PGD for fixmap */
504564
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
505565
__pa_symbol(fixmap_pgd_next),
@@ -533,6 +593,14 @@ static void __init setup_vm_final(void)
533593
/* Move to swapper page table */
534594
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
535595
local_flush_tlb_all();
596+
597+
/* generic page allocation functions must be used to setup page table */
598+
pt_ops.alloc_pte = alloc_pte_late;
599+
pt_ops.get_pte_virt = get_pte_virt_late;
600+
#ifndef __PAGETABLE_PMD_FOLDED
601+
pt_ops.alloc_pmd = alloc_pmd_late;
602+
pt_ops.get_pmd_virt = get_pmd_virt_late;
603+
#endif
536604
}
537605
#else
538606
asmlinkage void __init setup_vm(uintptr_t dtb_pa)

0 commit comments

Comments
 (0)