@@ -32,6 +32,15 @@ extern char _start[];
32
32
void * dtb_early_va __initdata ;
33
33
uintptr_t dtb_early_pa __initdata ;
34
34
35
+ struct pt_alloc_ops {
36
+ pte_t * (* get_pte_virt )(phys_addr_t pa );
37
+ phys_addr_t (* alloc_pte )(uintptr_t va );
38
+ #ifndef __PAGETABLE_PMD_FOLDED
39
+ pmd_t * (* get_pmd_virt )(phys_addr_t pa );
40
+ phys_addr_t (* alloc_pmd )(uintptr_t va );
41
+ #endif
42
+ };
43
+
35
44
static void __init zone_sizes_init (void )
36
45
{
37
46
unsigned long max_zone_pfns [MAX_NR_ZONES ] = { 0 , };
@@ -203,6 +212,8 @@ void __init setup_bootmem(void)
203
212
}
204
213
205
214
#ifdef CONFIG_MMU
215
+ static struct pt_alloc_ops pt_ops ;
216
+
206
217
unsigned long va_pa_offset ;
207
218
EXPORT_SYMBOL (va_pa_offset );
208
219
unsigned long pfn_base ;
@@ -211,7 +222,6 @@ EXPORT_SYMBOL(pfn_base);
211
222
pgd_t swapper_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
212
223
pgd_t trampoline_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
213
224
pte_t fixmap_pte [PTRS_PER_PTE ] __page_aligned_bss ;
214
- static bool mmu_enabled ;
215
225
216
226
#define MAX_EARLY_MAPPING_SIZE SZ_128M
217
227
@@ -234,27 +244,46 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
234
244
}
235
245
}
236
246
237
- static pte_t * __init get_pte_virt (phys_addr_t pa )
247
+ static inline pte_t * __init get_pte_virt_early (phys_addr_t pa )
238
248
{
239
- if (mmu_enabled ) {
240
- clear_fixmap (FIX_PTE );
241
- return (pte_t * )set_fixmap_offset (FIX_PTE , pa );
242
- } else {
243
- return (pte_t * )((uintptr_t )pa );
244
- }
249
+ return (pte_t * )((uintptr_t )pa );
245
250
}
246
251
247
- static phys_addr_t __init alloc_pte (uintptr_t va )
252
+ static inline pte_t * __init get_pte_virt_fixmap (phys_addr_t pa )
253
+ {
254
+ clear_fixmap (FIX_PTE );
255
+ return (pte_t * )set_fixmap_offset (FIX_PTE , pa );
256
+ }
257
+
258
+ static inline pte_t * get_pte_virt_late (phys_addr_t pa )
259
+ {
260
+ return (pte_t * ) __va (pa );
261
+ }
262
+
263
+ static inline phys_addr_t __init alloc_pte_early (uintptr_t va )
248
264
{
249
265
/*
250
266
* We only create PMD or PGD early mappings so we
251
267
* should never reach here with MMU disabled.
252
268
*/
253
- BUG_ON (!mmu_enabled );
269
+ BUG ();
270
+ }
254
271
272
+ static inline phys_addr_t __init alloc_pte_fixmap (uintptr_t va )
273
+ {
255
274
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
256
275
}
257
276
277
+ static phys_addr_t alloc_pte_late (uintptr_t va )
278
+ {
279
+ unsigned long vaddr ;
280
+
281
+ vaddr = __get_free_page (GFP_KERNEL );
282
+ if (!vaddr || !pgtable_pte_page_ctor (virt_to_page (vaddr )))
283
+ BUG ();
284
+ return __pa (vaddr );
285
+ }
286
+
258
287
static void __init create_pte_mapping (pte_t * ptep ,
259
288
uintptr_t va , phys_addr_t pa ,
260
289
phys_addr_t sz , pgprot_t prot )
@@ -279,28 +308,46 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
279
308
#endif
280
309
pmd_t early_pmd [PTRS_PER_PMD * NUM_EARLY_PMDS ] __initdata __aligned (PAGE_SIZE );
281
310
282
- static pmd_t * __init get_pmd_virt (phys_addr_t pa )
311
+ static pmd_t * __init get_pmd_virt_early (phys_addr_t pa )
283
312
{
284
- if (mmu_enabled ) {
285
- clear_fixmap (FIX_PMD );
286
- return (pmd_t * )set_fixmap_offset (FIX_PMD , pa );
287
- } else {
288
- return (pmd_t * )((uintptr_t )pa );
289
- }
313
+ /* Before MMU is enabled */
314
+ return (pmd_t * )((uintptr_t )pa );
290
315
}
291
316
292
- static phys_addr_t __init alloc_pmd ( uintptr_t va )
317
+ static pmd_t * __init get_pmd_virt_fixmap ( phys_addr_t pa )
293
318
{
294
- uintptr_t pmd_num ;
319
+ clear_fixmap (FIX_PMD );
320
+ return (pmd_t * )set_fixmap_offset (FIX_PMD , pa );
321
+ }
322
+
323
+ static pmd_t * get_pmd_virt_late (phys_addr_t pa )
324
+ {
325
+ return (pmd_t * ) __va (pa );
326
+ }
295
327
296
- if (mmu_enabled )
297
- return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
328
+ static phys_addr_t __init alloc_pmd_early (uintptr_t va )
329
+ {
330
+ uintptr_t pmd_num ;
298
331
299
332
pmd_num = (va - PAGE_OFFSET ) >> PGDIR_SHIFT ;
300
333
BUG_ON (pmd_num >= NUM_EARLY_PMDS );
301
334
return (uintptr_t )& early_pmd [pmd_num * PTRS_PER_PMD ];
302
335
}
303
336
337
+ static phys_addr_t __init alloc_pmd_fixmap (uintptr_t va )
338
+ {
339
+ return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
340
+ }
341
+
342
+ static phys_addr_t alloc_pmd_late (uintptr_t va )
343
+ {
344
+ unsigned long vaddr ;
345
+
346
+ vaddr = __get_free_page (GFP_KERNEL );
347
+ BUG_ON (!vaddr );
348
+ return __pa (vaddr );
349
+ }
350
+
304
351
static void __init create_pmd_mapping (pmd_t * pmdp ,
305
352
uintptr_t va , phys_addr_t pa ,
306
353
phys_addr_t sz , pgprot_t prot )
@@ -316,28 +363,28 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
316
363
}
317
364
318
365
if (pmd_none (pmdp [pmd_idx ])) {
319
- pte_phys = alloc_pte (va );
366
+ pte_phys = pt_ops . alloc_pte (va );
320
367
pmdp [pmd_idx ] = pfn_pmd (PFN_DOWN (pte_phys ), PAGE_TABLE );
321
- ptep = get_pte_virt (pte_phys );
368
+ ptep = pt_ops . get_pte_virt (pte_phys );
322
369
memset (ptep , 0 , PAGE_SIZE );
323
370
} else {
324
371
pte_phys = PFN_PHYS (_pmd_pfn (pmdp [pmd_idx ]));
325
- ptep = get_pte_virt (pte_phys );
372
+ ptep = pt_ops . get_pte_virt (pte_phys );
326
373
}
327
374
328
375
create_pte_mapping (ptep , va , pa , sz , prot );
329
376
}
330
377
331
378
#define pgd_next_t pmd_t
332
- #define alloc_pgd_next (__va ) alloc_pmd(__va)
333
- #define get_pgd_next_virt (__pa ) get_pmd_virt(__pa)
379
+ #define alloc_pgd_next (__va ) pt_ops. alloc_pmd(__va)
380
+ #define get_pgd_next_virt (__pa ) pt_ops. get_pmd_virt(__pa)
334
381
#define create_pgd_next_mapping (__nextp , __va , __pa , __sz , __prot ) \
335
382
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
336
383
#define fixmap_pgd_next fixmap_pmd
337
384
#else
338
385
#define pgd_next_t pte_t
339
- #define alloc_pgd_next (__va ) alloc_pte(__va)
340
- #define get_pgd_next_virt (__pa ) get_pte_virt(__pa)
386
+ #define alloc_pgd_next (__va ) pt_ops. alloc_pte(__va)
387
+ #define get_pgd_next_virt (__pa ) pt_ops. get_pte_virt(__pa)
341
388
#define create_pgd_next_mapping (__nextp , __va , __pa , __sz , __prot ) \
342
389
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
343
390
#define fixmap_pgd_next fixmap_pte
@@ -421,6 +468,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
421
468
BUG_ON ((load_pa % map_size ) != 0 );
422
469
BUG_ON (load_sz > MAX_EARLY_MAPPING_SIZE );
423
470
471
+ pt_ops .alloc_pte = alloc_pte_early ;
472
+ pt_ops .get_pte_virt = get_pte_virt_early ;
473
+ #ifndef __PAGETABLE_PMD_FOLDED
474
+ pt_ops .alloc_pmd = alloc_pmd_early ;
475
+ pt_ops .get_pmd_virt = get_pmd_virt_early ;
476
+ #endif
424
477
/* Setup early PGD for fixmap */
425
478
create_pgd_mapping (early_pg_dir , FIXADDR_START ,
426
479
(uintptr_t )fixmap_pgd_next , PGDIR_SIZE , PAGE_TABLE );
@@ -497,9 +550,16 @@ static void __init setup_vm_final(void)
497
550
phys_addr_t pa , start , end ;
498
551
struct memblock_region * reg ;
499
552
500
- /* Set mmu_enabled flag */
501
- mmu_enabled = true;
502
-
553
+ /**
554
+ * MMU is enabled at this point. But page table setup is not complete yet.
555
+ * fixmap page table alloc functions should be used at this point
556
+ */
557
+ pt_ops .alloc_pte = alloc_pte_fixmap ;
558
+ pt_ops .get_pte_virt = get_pte_virt_fixmap ;
559
+ #ifndef __PAGETABLE_PMD_FOLDED
560
+ pt_ops .alloc_pmd = alloc_pmd_fixmap ;
561
+ pt_ops .get_pmd_virt = get_pmd_virt_fixmap ;
562
+ #endif
503
563
/* Setup swapper PGD for fixmap */
504
564
create_pgd_mapping (swapper_pg_dir , FIXADDR_START ,
505
565
__pa_symbol (fixmap_pgd_next ),
@@ -533,6 +593,14 @@ static void __init setup_vm_final(void)
533
593
/* Move to swapper page table */
534
594
csr_write (CSR_SATP , PFN_DOWN (__pa_symbol (swapper_pg_dir )) | SATP_MODE );
535
595
local_flush_tlb_all ();
596
+
597
+ /* generic page allocation functions must be used to setup page table */
598
+ pt_ops .alloc_pte = alloc_pte_late ;
599
+ pt_ops .get_pte_virt = get_pte_virt_late ;
600
+ #ifndef __PAGETABLE_PMD_FOLDED
601
+ pt_ops .alloc_pmd = alloc_pmd_late ;
602
+ pt_ops .get_pmd_virt = get_pmd_virt_late ;
603
+ #endif
536
604
}
537
605
#else
538
606
asmlinkage void __init setup_vm (uintptr_t dtb_pa )
0 commit comments