@@ -296,7 +296,7 @@ static void __init setup_bootmem(void)
296
296
}
297
297
298
298
#ifdef CONFIG_MMU
299
- struct pt_alloc_ops pt_ops __initdata ;
299
+ struct pt_alloc_ops pt_ops __meminitdata ;
300
300
301
301
pgd_t swapper_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
302
302
pgd_t trampoline_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
@@ -358,7 +358,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
358
358
return (pte_t * )set_fixmap_offset (FIX_PTE , pa );
359
359
}
360
360
361
- static inline pte_t * __init get_pte_virt_late (phys_addr_t pa )
361
+ static inline pte_t * __meminit get_pte_virt_late (phys_addr_t pa )
362
362
{
363
363
return (pte_t * ) __va (pa );
364
364
}
@@ -377,17 +377,16 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
377
377
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
378
378
}
379
379
380
- static phys_addr_t __init alloc_pte_late (uintptr_t va )
380
+ static phys_addr_t __meminit alloc_pte_late (uintptr_t va )
381
381
{
382
382
struct ptdesc * ptdesc = pagetable_alloc (GFP_KERNEL & ~__GFP_HIGHMEM , 0 );
383
383
384
384
BUG_ON (!ptdesc || !pagetable_pte_ctor (ptdesc ));
385
385
return __pa ((pte_t * )ptdesc_address (ptdesc ));
386
386
}
387
387
388
- static void __init create_pte_mapping (pte_t * ptep ,
389
- uintptr_t va , phys_addr_t pa ,
390
- phys_addr_t sz , pgprot_t prot )
388
+ static void __meminit create_pte_mapping (pte_t * ptep , uintptr_t va , phys_addr_t pa , phys_addr_t sz ,
389
+ pgprot_t prot )
391
390
{
392
391
uintptr_t pte_idx = pte_index (va );
393
392
@@ -441,7 +440,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
441
440
return (pmd_t * )set_fixmap_offset (FIX_PMD , pa );
442
441
}
443
442
444
- static pmd_t * __init get_pmd_virt_late (phys_addr_t pa )
443
+ static pmd_t * __meminit get_pmd_virt_late (phys_addr_t pa )
445
444
{
446
445
return (pmd_t * ) __va (pa );
447
446
}
@@ -458,17 +457,17 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
458
457
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
459
458
}
460
459
461
- static phys_addr_t __init alloc_pmd_late (uintptr_t va )
460
+ static phys_addr_t __meminit alloc_pmd_late (uintptr_t va )
462
461
{
463
462
struct ptdesc * ptdesc = pagetable_alloc (GFP_KERNEL & ~__GFP_HIGHMEM , 0 );
464
463
465
464
BUG_ON (!ptdesc || !pagetable_pmd_ctor (ptdesc ));
466
465
return __pa ((pmd_t * )ptdesc_address (ptdesc ));
467
466
}
468
467
469
- static void __init create_pmd_mapping (pmd_t * pmdp ,
470
- uintptr_t va , phys_addr_t pa ,
471
- phys_addr_t sz , pgprot_t prot )
468
+ static void __meminit create_pmd_mapping (pmd_t * pmdp ,
469
+ uintptr_t va , phys_addr_t pa ,
470
+ phys_addr_t sz , pgprot_t prot )
472
471
{
473
472
pte_t * ptep ;
474
473
phys_addr_t pte_phys ;
@@ -504,7 +503,7 @@ static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
504
503
return (pud_t * )set_fixmap_offset (FIX_PUD , pa );
505
504
}
506
505
507
- static pud_t * __init get_pud_virt_late (phys_addr_t pa )
506
+ static pud_t * __meminit get_pud_virt_late (phys_addr_t pa )
508
507
{
509
508
return (pud_t * )__va (pa );
510
509
}
@@ -522,7 +521,7 @@ static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
522
521
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
523
522
}
524
523
525
- static phys_addr_t alloc_pud_late (uintptr_t va )
524
+ static phys_addr_t __meminit alloc_pud_late (uintptr_t va )
526
525
{
527
526
unsigned long vaddr ;
528
527
@@ -542,7 +541,7 @@ static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
542
541
return (p4d_t * )set_fixmap_offset (FIX_P4D , pa );
543
542
}
544
543
545
- static p4d_t * __init get_p4d_virt_late (phys_addr_t pa )
544
+ static p4d_t * __meminit get_p4d_virt_late (phys_addr_t pa )
546
545
{
547
546
return (p4d_t * )__va (pa );
548
547
}
@@ -560,7 +559,7 @@ static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
560
559
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
561
560
}
562
561
563
- static phys_addr_t alloc_p4d_late (uintptr_t va )
562
+ static phys_addr_t __meminit alloc_p4d_late (uintptr_t va )
564
563
{
565
564
unsigned long vaddr ;
566
565
@@ -569,9 +568,8 @@ static phys_addr_t alloc_p4d_late(uintptr_t va)
569
568
return __pa (vaddr );
570
569
}
571
570
572
- static void __init create_pud_mapping (pud_t * pudp ,
573
- uintptr_t va , phys_addr_t pa ,
574
- phys_addr_t sz , pgprot_t prot )
571
+ static void __meminit create_pud_mapping (pud_t * pudp , uintptr_t va , phys_addr_t pa , phys_addr_t sz ,
572
+ pgprot_t prot )
575
573
{
576
574
pmd_t * nextp ;
577
575
phys_addr_t next_phys ;
@@ -596,9 +594,8 @@ static void __init create_pud_mapping(pud_t *pudp,
596
594
create_pmd_mapping (nextp , va , pa , sz , prot );
597
595
}
598
596
599
- static void __init create_p4d_mapping (p4d_t * p4dp ,
600
- uintptr_t va , phys_addr_t pa ,
601
- phys_addr_t sz , pgprot_t prot )
597
+ static void __meminit create_p4d_mapping (p4d_t * p4dp , uintptr_t va , phys_addr_t pa , phys_addr_t sz ,
598
+ pgprot_t prot )
602
599
{
603
600
pud_t * nextp ;
604
601
phys_addr_t next_phys ;
@@ -654,9 +651,8 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
654
651
#define create_pmd_mapping (__pmdp , __va , __pa , __sz , __prot ) do {} while(0)
655
652
#endif /* __PAGETABLE_PMD_FOLDED */
656
653
657
- void __init create_pgd_mapping (pgd_t * pgdp ,
658
- uintptr_t va , phys_addr_t pa ,
659
- phys_addr_t sz , pgprot_t prot )
654
+ void __meminit create_pgd_mapping (pgd_t * pgdp , uintptr_t va , phys_addr_t pa , phys_addr_t sz ,
655
+ pgprot_t prot )
660
656
{
661
657
pgd_next_t * nextp ;
662
658
phys_addr_t next_phys ;
@@ -681,8 +677,7 @@ void __init create_pgd_mapping(pgd_t *pgdp,
681
677
create_pgd_next_mapping (nextp , va , pa , sz , prot );
682
678
}
683
679
684
- static uintptr_t __init best_map_size (phys_addr_t pa , uintptr_t va ,
685
- phys_addr_t size )
680
+ static uintptr_t __meminit best_map_size (phys_addr_t pa , uintptr_t va , phys_addr_t size )
686
681
{
687
682
if (debug_pagealloc_enabled ())
688
683
return PAGE_SIZE ;
@@ -718,7 +713,7 @@ asmlinkage void __init __copy_data(void)
718
713
#endif
719
714
720
715
#ifdef CONFIG_STRICT_KERNEL_RWX
721
- static __init pgprot_t pgprot_from_va (uintptr_t va )
716
+ static __meminit pgprot_t pgprot_from_va (uintptr_t va )
722
717
{
723
718
if (is_va_kernel_text (va ))
724
719
return PAGE_KERNEL_READ_EXEC ;
@@ -743,7 +738,7 @@ void mark_rodata_ro(void)
743
738
set_memory_ro );
744
739
}
745
740
#else
746
- static __init pgprot_t pgprot_from_va (uintptr_t va )
741
+ static __meminit pgprot_t pgprot_from_va (uintptr_t va )
747
742
{
748
743
if (IS_ENABLED (CONFIG_64BIT ) && !is_kernel_mapping (va ))
749
744
return PAGE_KERNEL ;
@@ -1235,9 +1230,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1235
1230
pt_ops_set_fixmap ();
1236
1231
}
1237
1232
1238
- static void __init create_linear_mapping_range (phys_addr_t start ,
1239
- phys_addr_t end ,
1240
- uintptr_t fixed_map_size )
1233
+ static void __meminit create_linear_mapping_range (phys_addr_t start , phys_addr_t end ,
1234
+ uintptr_t fixed_map_size )
1241
1235
{
1242
1236
phys_addr_t pa ;
1243
1237
uintptr_t va , map_size ;
0 commit comments