@@ -436,6 +436,43 @@ asmlinkage void __init __copy_data(void)
436
436
}
437
437
#endif
438
438
439
+ #ifdef CONFIG_STRICT_KERNEL_RWX
440
+ static __init pgprot_t pgprot_from_va (uintptr_t va )
441
+ {
442
+ if (is_va_kernel_text (va ))
443
+ return PAGE_KERNEL_READ_EXEC ;
444
+
445
+ /*
446
+ * In 64-bit kernel, the kernel mapping is outside the linear mapping so
447
+ * we must protect its linear mapping alias from being executed and
448
+ * written.
449
+ * And rodata section is marked readonly in mark_rodata_ro.
450
+ */
451
+ if (IS_ENABLED (CONFIG_64BIT ) && is_va_kernel_lm_alias_text (va ))
452
+ return PAGE_KERNEL_READ ;
453
+
454
+ return PAGE_KERNEL ;
455
+ }
456
+
457
+ void mark_rodata_ro (void )
458
+ {
459
+ set_kernel_memory (__start_rodata , _data , set_memory_ro );
460
+ if (IS_ENABLED (CONFIG_64BIT ))
461
+ set_kernel_memory (lm_alias (__start_rodata ), lm_alias (_data ),
462
+ set_memory_ro );
463
+
464
+ debug_checkwx ();
465
+ }
466
+ #else
467
+ static __init pgprot_t pgprot_from_va (uintptr_t va )
468
+ {
469
+ if (IS_ENABLED (CONFIG_64BIT ) && !is_kernel_mapping (va ))
470
+ return PAGE_KERNEL ;
471
+
472
+ return PAGE_KERNEL_EXEC ;
473
+ }
474
+ #endif /* CONFIG_STRICT_KERNEL_RWX */
475
+
439
476
/*
440
477
* setup_vm() is called from head.S with MMU-off.
441
478
*
@@ -454,7 +491,8 @@ asmlinkage void __init __copy_data(void)
454
491
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
455
492
#endif
456
493
457
- uintptr_t load_pa , load_sz ;
494
+ static uintptr_t load_pa __initdata ;
495
+ uintptr_t load_sz ;
458
496
#ifdef CONFIG_XIP_KERNEL
459
497
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
460
498
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
@@ -465,7 +503,8 @@ uintptr_t xiprom, xiprom_sz;
465
503
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
466
504
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
467
505
468
- static void __init create_kernel_page_table (pgd_t * pgdir , uintptr_t map_size )
506
+ static void __init create_kernel_page_table (pgd_t * pgdir , uintptr_t map_size ,
507
+ __always_unused bool early )
469
508
{
470
509
uintptr_t va , end_va ;
471
510
@@ -484,15 +523,18 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
484
523
map_size , PAGE_KERNEL );
485
524
}
486
525
#else
487
- static void __init create_kernel_page_table (pgd_t * pgdir , uintptr_t map_size )
526
+ static void __init create_kernel_page_table (pgd_t * pgdir , uintptr_t map_size ,
527
+ bool early )
488
528
{
489
529
uintptr_t va , end_va ;
490
530
491
531
end_va = kernel_virt_addr + load_sz ;
492
532
for (va = kernel_virt_addr ; va < end_va ; va += map_size )
493
533
create_pgd_mapping (pgdir , va ,
494
534
load_pa + (va - kernel_virt_addr ),
495
- map_size , PAGE_KERNEL_EXEC );
535
+ map_size ,
536
+ early ?
537
+ PAGE_KERNEL_EXEC : pgprot_from_va (va ));
496
538
}
497
539
#endif
498
540
@@ -569,7 +611,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
569
611
* us to reach paging_init(). We map all memory banks later
570
612
* in setup_vm_final() below.
571
613
*/
572
- create_kernel_page_table (early_pg_dir , map_size );
614
+ create_kernel_page_table (early_pg_dir , map_size , true );
573
615
574
616
#ifndef __PAGETABLE_PMD_FOLDED
575
617
/* Setup early PMD for DTB */
@@ -645,22 +687,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
645
687
#endif
646
688
}
647
689
648
- #if defined(CONFIG_64BIT ) && defined(CONFIG_STRICT_KERNEL_RWX )
649
- void protect_kernel_linear_mapping_text_rodata (void )
650
- {
651
- unsigned long text_start = (unsigned long )lm_alias (_start );
652
- unsigned long init_text_start = (unsigned long )lm_alias (__init_text_begin );
653
- unsigned long rodata_start = (unsigned long )lm_alias (__start_rodata );
654
- unsigned long data_start = (unsigned long )lm_alias (_data );
655
-
656
- set_memory_ro (text_start , (init_text_start - text_start ) >> PAGE_SHIFT );
657
- set_memory_nx (text_start , (init_text_start - text_start ) >> PAGE_SHIFT );
658
-
659
- set_memory_ro (rodata_start , (data_start - rodata_start ) >> PAGE_SHIFT );
660
- set_memory_nx (rodata_start , (data_start - rodata_start ) >> PAGE_SHIFT );
661
- }
662
- #endif
663
-
664
690
static void __init setup_vm_final (void )
665
691
{
666
692
uintptr_t va , map_size ;
@@ -693,21 +719,15 @@ static void __init setup_vm_final(void)
693
719
map_size = best_map_size (start , end - start );
694
720
for (pa = start ; pa < end ; pa += map_size ) {
695
721
va = (uintptr_t )__va (pa );
696
- create_pgd_mapping (swapper_pg_dir , va , pa ,
697
- map_size ,
698
- #ifdef CONFIG_64BIT
699
- PAGE_KERNEL
700
- #else
701
- PAGE_KERNEL_EXEC
702
- #endif
703
- );
704
722
723
+ create_pgd_mapping (swapper_pg_dir , va , pa , map_size ,
724
+ pgprot_from_va (va ));
705
725
}
706
726
}
707
727
708
728
#ifdef CONFIG_64BIT
709
729
/* Map the kernel */
710
- create_kernel_page_table (swapper_pg_dir , PMD_SIZE );
730
+ create_kernel_page_table (swapper_pg_dir , PMD_SIZE , false );
711
731
#endif
712
732
713
733
/* Clear fixmap PTE and PMD mappings */
@@ -738,39 +758,6 @@ static inline void setup_vm_final(void)
738
758
}
739
759
#endif /* CONFIG_MMU */
740
760
741
- #ifdef CONFIG_STRICT_KERNEL_RWX
742
- void __init protect_kernel_text_data (void )
743
- {
744
- unsigned long text_start = (unsigned long )_start ;
745
- unsigned long init_text_start = (unsigned long )__init_text_begin ;
746
- unsigned long init_data_start = (unsigned long )__init_data_begin ;
747
- unsigned long rodata_start = (unsigned long )__start_rodata ;
748
- unsigned long data_start = (unsigned long )_data ;
749
- #if defined(CONFIG_64BIT ) && defined(CONFIG_MMU )
750
- unsigned long end_va = kernel_virt_addr + load_sz ;
751
- #else
752
- unsigned long end_va = (unsigned long )(__va (PFN_PHYS (max_low_pfn )));
753
- #endif
754
-
755
- set_memory_ro (text_start , (init_text_start - text_start ) >> PAGE_SHIFT );
756
- set_memory_ro (init_text_start , (init_data_start - init_text_start ) >> PAGE_SHIFT );
757
- set_memory_nx (init_data_start , (rodata_start - init_data_start ) >> PAGE_SHIFT );
758
- /* rodata section is marked readonly in mark_rodata_ro */
759
- set_memory_nx (rodata_start , (data_start - rodata_start ) >> PAGE_SHIFT );
760
- set_memory_nx (data_start , (end_va - data_start ) >> PAGE_SHIFT );
761
- }
762
-
763
- void mark_rodata_ro (void )
764
- {
765
- unsigned long rodata_start = (unsigned long )__start_rodata ;
766
- unsigned long data_start = (unsigned long )_data ;
767
-
768
- set_memory_ro (rodata_start , (data_start - rodata_start ) >> PAGE_SHIFT );
769
-
770
- debug_checkwx ();
771
- }
772
- #endif
773
-
774
761
#ifdef CONFIG_KEXEC_CORE
775
762
/*
776
763
* reserve_crashkernel() - reserves memory for crash kernel
0 commit comments