@@ -54,7 +54,7 @@ struct pt_alloc_ops {
54
54
#endif
55
55
};
56
56
57
- static phys_addr_t dma32_phys_limit __ro_after_init ;
57
+ static phys_addr_t dma32_phys_limit __initdata ;
58
58
59
59
static void __init zone_sizes_init (void )
60
60
{
@@ -173,7 +173,7 @@ static void __init setup_bootmem(void)
173
173
}
174
174
175
175
#ifdef CONFIG_MMU
176
- static struct pt_alloc_ops _pt_ops __ro_after_init ;
176
+ static struct pt_alloc_ops _pt_ops __initdata ;
177
177
178
178
#ifdef CONFIG_XIP_KERNEL
179
179
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
@@ -189,13 +189,13 @@ EXPORT_SYMBOL(va_pa_offset);
189
189
#endif
190
190
/* Offset between kernel mapping virtual address and kernel load address */
191
191
#ifdef CONFIG_64BIT
192
- unsigned long va_kernel_pa_offset ;
192
+ unsigned long va_kernel_pa_offset __ro_after_init ;
193
193
EXPORT_SYMBOL (va_kernel_pa_offset );
194
194
#endif
195
195
#ifdef CONFIG_XIP_KERNEL
196
196
#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
197
197
#endif
198
- unsigned long va_kernel_xip_pa_offset ;
198
+ unsigned long va_kernel_xip_pa_offset __ro_after_init ;
199
199
EXPORT_SYMBOL (va_kernel_xip_pa_offset );
200
200
#ifdef CONFIG_XIP_KERNEL
201
201
#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(pfn_base);
205
205
206
206
pgd_t swapper_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
207
207
pgd_t trampoline_pg_dir [PTRS_PER_PGD ] __page_aligned_bss ;
208
- pte_t fixmap_pte [PTRS_PER_PTE ] __page_aligned_bss ;
208
+ static pte_t fixmap_pte [PTRS_PER_PTE ] __page_aligned_bss ;
209
209
210
210
pgd_t early_pg_dir [PTRS_PER_PGD ] __initdata __aligned (PAGE_SIZE );
211
211
@@ -242,7 +242,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
242
242
return (pte_t * )set_fixmap_offset (FIX_PTE , pa );
243
243
}
244
244
245
- static inline pte_t * get_pte_virt_late (phys_addr_t pa )
245
+ static inline pte_t * __init get_pte_virt_late (phys_addr_t pa )
246
246
{
247
247
return (pte_t * ) __va (pa );
248
248
}
@@ -261,7 +261,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
261
261
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
262
262
}
263
263
264
- static phys_addr_t alloc_pte_late (uintptr_t va )
264
+ static phys_addr_t __init alloc_pte_late (uintptr_t va )
265
265
{
266
266
unsigned long vaddr ;
267
267
@@ -285,10 +285,10 @@ static void __init create_pte_mapping(pte_t *ptep,
285
285
286
286
#ifndef __PAGETABLE_PMD_FOLDED
287
287
288
- pmd_t trampoline_pmd [PTRS_PER_PMD ] __page_aligned_bss ;
289
- pmd_t fixmap_pmd [PTRS_PER_PMD ] __page_aligned_bss ;
290
- pmd_t early_pmd [PTRS_PER_PMD ] __initdata __aligned (PAGE_SIZE );
291
- pmd_t early_dtb_pmd [PTRS_PER_PMD ] __initdata __aligned (PAGE_SIZE );
288
+ static pmd_t trampoline_pmd [PTRS_PER_PMD ] __page_aligned_bss ;
289
+ static pmd_t fixmap_pmd [PTRS_PER_PMD ] __page_aligned_bss ;
290
+ static pmd_t early_pmd [PTRS_PER_PMD ] __initdata __aligned (PAGE_SIZE );
291
+ static pmd_t early_dtb_pmd [PTRS_PER_PMD ] __initdata __aligned (PAGE_SIZE );
292
292
293
293
#ifdef CONFIG_XIP_KERNEL
294
294
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@@ -308,7 +308,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
308
308
return (pmd_t * )set_fixmap_offset (FIX_PMD , pa );
309
309
}
310
310
311
- static pmd_t * get_pmd_virt_late (phys_addr_t pa )
311
+ static pmd_t * __init get_pmd_virt_late (phys_addr_t pa )
312
312
{
313
313
return (pmd_t * ) __va (pa );
314
314
}
@@ -325,7 +325,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
325
325
return memblock_phys_alloc (PAGE_SIZE , PAGE_SIZE );
326
326
}
327
327
328
- static phys_addr_t alloc_pmd_late (uintptr_t va )
328
+ static phys_addr_t __init alloc_pmd_late (uintptr_t va )
329
329
{
330
330
unsigned long vaddr ;
331
331
@@ -443,14 +443,16 @@ asmlinkage void __init __copy_data(void)
443
443
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
444
444
#endif
445
445
446
- uintptr_t load_pa , load_sz ;
446
+ static uintptr_t load_pa __initdata ;
447
+ static uintptr_t load_sz __initdata ;
447
448
#ifdef CONFIG_XIP_KERNEL
448
449
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
449
450
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
450
451
#endif
451
452
452
453
#ifdef CONFIG_XIP_KERNEL
453
- uintptr_t xiprom , xiprom_sz ;
454
+ static uintptr_t xiprom __inidata ;
455
+ static uintptr_t xiprom_sz __initdata ;
454
456
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
455
457
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
456
458
@@ -635,7 +637,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
635
637
}
636
638
637
639
#if defined(CONFIG_64BIT ) && defined(CONFIG_STRICT_KERNEL_RWX )
638
- void protect_kernel_linear_mapping_text_rodata (void )
640
+ void __init protect_kernel_linear_mapping_text_rodata (void )
639
641
{
640
642
unsigned long text_start = (unsigned long )lm_alias (_start );
641
643
unsigned long init_text_start = (unsigned long )lm_alias (__init_text_begin );
@@ -843,7 +845,7 @@ static void __init reserve_crashkernel(void)
843
845
* reserved once we call early_init_fdt_scan_reserved_mem()
844
846
* later on.
845
847
*/
846
- static int elfcore_hdr_setup (struct reserved_mem * rmem )
848
+ static int __init elfcore_hdr_setup (struct reserved_mem * rmem )
847
849
{
848
850
elfcorehdr_addr = rmem -> base ;
849
851
elfcorehdr_size = rmem -> size ;
0 commit comments