Skip to content

Commit e5c35fa

Browse files
AlexGhitipalmer-dabbelt
authored andcommitted
riscv: Map the kernel with correct permissions the first time
For 64-bit kernels, we map all the kernel with write and execute permissions and afterwards remove writability from text and executability from data. For 32-bit kernels, the kernel mapping resides in the linear mapping, so we map all the linear mapping as writable and executable and afterwards we remove those properties for unused memory and kernel mapping as described above. Change this behavior to directly map the kernel with correct permissions and avoid going through the whole mapping to fix the permissions. At the same time, this fixes an issue introduced by commit 2bfc6cd ("riscv: Move kernel mapping outside of linear mapping") as reported here starfive-tech/linux#17. Signed-off-by: Alexandre Ghiti <[email protected]> Reviewed-by: Anup Patel <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent c10bc26 commit e5c35fa

File tree

5 files changed

+82
-81
lines changed

5 files changed

+82
-81
lines changed

arch/riscv/include/asm/page.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset;
9595
#endif
9696
extern unsigned long va_kernel_xip_pa_offset;
9797
extern unsigned long pfn_base;
98+
extern uintptr_t load_sz;
9899
#define ARCH_PFN_OFFSET (pfn_base)
99100
#else
100101
#define va_pa_offset 0
@@ -108,6 +109,11 @@ extern unsigned long pfn_base;
108109
extern unsigned long kernel_virt_addr;
109110

110111
#ifdef CONFIG_64BIT
112+
#define is_kernel_mapping(x) \
113+
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
114+
#define is_linear_mapping(x) \
115+
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
116+
111117
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
112118
#define kernel_mapping_pa_to_va(y) ({ \
113119
unsigned long _y = y; \
@@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr;
127133

128134
#define __va_to_pa_nodebug(x) ({ \
129135
unsigned long _x = x; \
130-
(_x < kernel_virt_addr) ? \
136+
is_linear_mapping(_x) ? \
131137
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
132138
})
133139
#else
140+
#define is_kernel_mapping(x) \
141+
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
142+
#define is_linear_mapping(x) \
143+
((x) >= PAGE_OFFSET)
144+
134145
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
135146
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
136147
#endif /* CONFIG_64BIT */

arch/riscv/include/asm/sections.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,28 @@
66
#define __ASM_SECTIONS_H
77

88
#include <asm-generic/sections.h>
9+
#include <linux/mm.h>
910

1011
extern char _start[];
1112
extern char _start_kernel[];
1213
extern char __init_data_begin[], __init_data_end[];
1314
extern char __init_text_begin[], __init_text_end[];
1415
extern char __alt_start[], __alt_end[];
1516

17+
static inline bool is_va_kernel_text(uintptr_t va)
18+
{
19+
uintptr_t start = (uintptr_t)_start;
20+
uintptr_t end = (uintptr_t)__init_data_begin;
21+
22+
return va >= start && va < end;
23+
}
24+
25+
static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
26+
{
27+
uintptr_t start = (uintptr_t)lm_alias(_start);
28+
uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
29+
30+
return va >= start && va < end;
31+
}
32+
1633
#endif /* __ASM_SECTIONS_H */

arch/riscv/include/asm/set_memory.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ int set_memory_rw(unsigned long addr, int numpages);
1616
int set_memory_x(unsigned long addr, int numpages);
1717
int set_memory_nx(unsigned long addr, int numpages);
1818
int set_memory_rw_nx(unsigned long addr, int numpages);
19-
void protect_kernel_text_data(void);
2019
static __always_inline int set_kernel_memory(char *startp, char *endp,
2120
int (*set_memory)(unsigned long start,
2221
int num_pages))
@@ -32,7 +31,6 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
3231
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
3332
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
3433
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
35-
static inline void protect_kernel_text_data(void) {}
3634
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
3735
static inline int set_kernel_memory(char *startp, char *endp,
3836
int (*set_memory)(unsigned long start,
@@ -42,12 +40,6 @@ static inline int set_kernel_memory(char *startp, char *endp,
4240
}
4341
#endif
4442

45-
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
46-
void protect_kernel_linear_mapping_text_rodata(void);
47-
#else
48-
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
49-
#endif
50-
5143
int set_direct_map_invalid_noflush(struct page *page);
5244
int set_direct_map_default_noflush(struct page *page);
5345
bool kernel_page_present(struct page *page);

arch/riscv/kernel/setup.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -291,11 +291,6 @@ void __init setup_arch(char **cmdline_p)
291291
init_resources();
292292
sbi_init();
293293

294-
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
295-
protect_kernel_text_data();
296-
protect_kernel_linear_mapping_text_rodata();
297-
}
298-
299294
#ifdef CONFIG_SWIOTLB
300295
swiotlb_init(1);
301296
#endif
@@ -334,11 +329,10 @@ subsys_initcall(topology_init);
334329

335330
void free_initmem(void)
336331
{
337-
unsigned long init_begin = (unsigned long)__init_begin;
338-
unsigned long init_end = (unsigned long)__init_end;
339-
340332
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
341-
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
333+
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
334+
IS_ENABLED(CONFIG_64BIT) ?
335+
set_memory_rw : set_memory_rw_nx);
342336

343337
free_initmem_default(POISON_FREE_INITMEM);
344338
}

arch/riscv/mm/init.c

Lines changed: 50 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -436,6 +436,43 @@ asmlinkage void __init __copy_data(void)
436436
}
437437
#endif
438438

439+
#ifdef CONFIG_STRICT_KERNEL_RWX
440+
static __init pgprot_t pgprot_from_va(uintptr_t va)
441+
{
442+
if (is_va_kernel_text(va))
443+
return PAGE_KERNEL_READ_EXEC;
444+
445+
/*
446+
* In 64-bit kernel, the kernel mapping is outside the linear mapping so
447+
* we must protect its linear mapping alias from being executed and
448+
* written.
449+
* And rodata section is marked readonly in mark_rodata_ro.
450+
*/
451+
if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
452+
return PAGE_KERNEL_READ;
453+
454+
return PAGE_KERNEL;
455+
}
456+
457+
void mark_rodata_ro(void)
458+
{
459+
set_kernel_memory(__start_rodata, _data, set_memory_ro);
460+
if (IS_ENABLED(CONFIG_64BIT))
461+
set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
462+
set_memory_ro);
463+
464+
debug_checkwx();
465+
}
466+
#else
467+
static __init pgprot_t pgprot_from_va(uintptr_t va)
468+
{
469+
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
470+
return PAGE_KERNEL;
471+
472+
return PAGE_KERNEL_EXEC;
473+
}
474+
#endif /* CONFIG_STRICT_KERNEL_RWX */
475+
439476
/*
440477
* setup_vm() is called from head.S with MMU-off.
441478
*
@@ -454,7 +491,8 @@ asmlinkage void __init __copy_data(void)
454491
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
455492
#endif
456493

457-
uintptr_t load_pa, load_sz;
494+
static uintptr_t load_pa __initdata;
495+
uintptr_t load_sz;
458496
#ifdef CONFIG_XIP_KERNEL
459497
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
460498
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
@@ -465,7 +503,8 @@ uintptr_t xiprom, xiprom_sz;
465503
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
466504
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
467505

468-
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
506+
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
507+
__always_unused bool early)
469508
{
470509
uintptr_t va, end_va;
471510

@@ -484,15 +523,18 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
484523
map_size, PAGE_KERNEL);
485524
}
486525
#else
487-
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
526+
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
527+
bool early)
488528
{
489529
uintptr_t va, end_va;
490530

491531
end_va = kernel_virt_addr + load_sz;
492532
for (va = kernel_virt_addr; va < end_va; va += map_size)
493533
create_pgd_mapping(pgdir, va,
494534
load_pa + (va - kernel_virt_addr),
495-
map_size, PAGE_KERNEL_EXEC);
535+
map_size,
536+
early ?
537+
PAGE_KERNEL_EXEC : pgprot_from_va(va));
496538
}
497539
#endif
498540

@@ -569,7 +611,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
569611
* us to reach paging_init(). We map all memory banks later
570612
* in setup_vm_final() below.
571613
*/
572-
create_kernel_page_table(early_pg_dir, map_size);
614+
create_kernel_page_table(early_pg_dir, map_size, true);
573615

574616
#ifndef __PAGETABLE_PMD_FOLDED
575617
/* Setup early PMD for DTB */
@@ -645,22 +687,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
645687
#endif
646688
}
647689

648-
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
649-
void protect_kernel_linear_mapping_text_rodata(void)
650-
{
651-
unsigned long text_start = (unsigned long)lm_alias(_start);
652-
unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
653-
unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
654-
unsigned long data_start = (unsigned long)lm_alias(_data);
655-
656-
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
657-
set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
658-
659-
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
660-
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
661-
}
662-
#endif
663-
664690
static void __init setup_vm_final(void)
665691
{
666692
uintptr_t va, map_size;
@@ -693,21 +719,15 @@ static void __init setup_vm_final(void)
693719
map_size = best_map_size(start, end - start);
694720
for (pa = start; pa < end; pa += map_size) {
695721
va = (uintptr_t)__va(pa);
696-
create_pgd_mapping(swapper_pg_dir, va, pa,
697-
map_size,
698-
#ifdef CONFIG_64BIT
699-
PAGE_KERNEL
700-
#else
701-
PAGE_KERNEL_EXEC
702-
#endif
703-
);
704722

723+
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
724+
pgprot_from_va(va));
705725
}
706726
}
707727

708728
#ifdef CONFIG_64BIT
709729
/* Map the kernel */
710-
create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
730+
create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
711731
#endif
712732

713733
/* Clear fixmap PTE and PMD mappings */
@@ -738,39 +758,6 @@ static inline void setup_vm_final(void)
738758
}
739759
#endif /* CONFIG_MMU */
740760

741-
#ifdef CONFIG_STRICT_KERNEL_RWX
742-
void __init protect_kernel_text_data(void)
743-
{
744-
unsigned long text_start = (unsigned long)_start;
745-
unsigned long init_text_start = (unsigned long)__init_text_begin;
746-
unsigned long init_data_start = (unsigned long)__init_data_begin;
747-
unsigned long rodata_start = (unsigned long)__start_rodata;
748-
unsigned long data_start = (unsigned long)_data;
749-
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
750-
unsigned long end_va = kernel_virt_addr + load_sz;
751-
#else
752-
unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
753-
#endif
754-
755-
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
756-
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
757-
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
758-
/* rodata section is marked readonly in mark_rodata_ro */
759-
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
760-
set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
761-
}
762-
763-
void mark_rodata_ro(void)
764-
{
765-
unsigned long rodata_start = (unsigned long)__start_rodata;
766-
unsigned long data_start = (unsigned long)_data;
767-
768-
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
769-
770-
debug_checkwx();
771-
}
772-
#endif
773-
774761
#ifdef CONFIG_KEXEC_CORE
775762
/*
776763
* reserve_crashkernel() - reserves memory for crash kernel

0 commit comments

Comments
 (0)