Skip to content

Commit 01112e5

Browse files
Merge branch 'riscv-wx-mappings' into for-next
This contains both the short-term fix for the W+X boot mappings and the larger cleanup. * riscv-wx-mappings: riscv: Map the kernel with correct permissions the first time riscv: Introduce set_kernel_memory helper riscv: Simplify xip and !xip kernel address conversion macros riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED riscv: mm: Fix W+X mappings at boot
2 parents 47513f2 + e5c35fa commit 01112e5

File tree

7 files changed

+102
-94
lines changed

7 files changed

+102
-94
lines changed

arch/riscv/Kconfig

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -494,13 +494,8 @@ config STACKPROTECTOR_PER_TASK
494494
def_bool y
495495
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
496496

497-
config PHYS_RAM_BASE_FIXED
498-
bool "Explicitly specified physical RAM address"
499-
default n
500-
501497
config PHYS_RAM_BASE
502498
hex "Platform Physical RAM address"
503-
depends on PHYS_RAM_BASE_FIXED
504499
default "0x80000000"
505500
help
506501
This is the physical address of RAM in the system. It has to be
@@ -513,7 +508,6 @@ config XIP_KERNEL
513508
# This prevents XIP from being enabled by all{yes,mod}config, which
514509
# fail to build since XIP doesn't support large kernels.
515510
depends on !COMPILE_TEST
516-
select PHYS_RAM_BASE_FIXED
517511
help
518512
Execute-In-Place allows the kernel to run from non-volatile storage
519513
directly addressable by the CPU, such as NOR flash. This saves RAM

arch/riscv/include/asm/page.h

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -83,55 +83,58 @@ extern unsigned long va_pa_offset;
8383
#ifdef CONFIG_64BIT
8484
extern unsigned long va_kernel_pa_offset;
8585
#endif
86-
#ifdef CONFIG_XIP_KERNEL
8786
extern unsigned long va_kernel_xip_pa_offset;
88-
#endif
8987
extern unsigned long pfn_base;
88+
extern uintptr_t load_sz;
9089
#define ARCH_PFN_OFFSET (pfn_base)
9190
#else
9291
#define va_pa_offset 0
9392
#ifdef CONFIG_64BIT
9493
#define va_kernel_pa_offset 0
9594
#endif
95+
#define va_kernel_xip_pa_offset 0
9696
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
9797
#endif /* CONFIG_MMU */
9898

9999
extern unsigned long kernel_virt_addr;
100100

101101
#ifdef CONFIG_64BIT
102+
#define is_kernel_mapping(x) \
103+
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
104+
#define is_linear_mapping(x) \
105+
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
106+
102107
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
103-
#ifdef CONFIG_XIP_KERNEL
104108
#define kernel_mapping_pa_to_va(y) ({ \
105109
unsigned long _y = y; \
106110
(_y >= CONFIG_PHYS_RAM_BASE) ? \
107111
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
108112
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
109113
})
110-
#else
111-
#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
112-
#endif
113114
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
114115

115116
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
116-
#ifdef CONFIG_XIP_KERNEL
117117
#define kernel_mapping_va_to_pa(y) ({ \
118118
unsigned long _y = y; \
119119
(_y < kernel_virt_addr + XIP_OFFSET) ? \
120120
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
121121
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
122122
})
123-
#else
124-
#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
125-
#endif
123+
126124
#define __va_to_pa_nodebug(x) ({ \
127125
unsigned long _x = x; \
128-
(_x < kernel_virt_addr) ? \
126+
is_linear_mapping(_x) ? \
129127
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
130128
})
131129
#else
130+
#define is_kernel_mapping(x) \
131+
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
132+
#define is_linear_mapping(x) \
133+
((x) >= PAGE_OFFSET)
134+
132135
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
133136
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
134-
#endif
137+
#endif /* CONFIG_64BIT */
135138

136139
#ifdef CONFIG_DEBUG_VIRTUAL
137140
extern phys_addr_t __virt_to_phys(unsigned long x);

arch/riscv/include/asm/pgtable.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@
7777

7878
#ifdef CONFIG_XIP_KERNEL
7979
#define XIP_OFFSET SZ_8M
80+
#else
81+
#define XIP_OFFSET 0
8082
#endif
8183

8284
#ifndef __ASSEMBLY__

arch/riscv/include/asm/sections.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,28 @@
66
#define __ASM_SECTIONS_H
77

88
#include <asm-generic/sections.h>
9+
#include <linux/mm.h>
910

1011
extern char _start[];
1112
extern char _start_kernel[];
1213
extern char __init_data_begin[], __init_data_end[];
1314
extern char __init_text_begin[], __init_text_end[];
1415
extern char __alt_start[], __alt_end[];
1516

17+
static inline bool is_va_kernel_text(uintptr_t va)
18+
{
19+
uintptr_t start = (uintptr_t)_start;
20+
uintptr_t end = (uintptr_t)__init_data_begin;
21+
22+
return va >= start && va < end;
23+
}
24+
25+
static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
26+
{
27+
uintptr_t start = (uintptr_t)lm_alias(_start);
28+
uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
29+
30+
return va >= start && va < end;
31+
}
32+
1633
#endif /* __ASM_SECTIONS_H */

arch/riscv/include/asm/set_memory.h

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages);
1616
int set_memory_x(unsigned long addr, int numpages);
1717
int set_memory_nx(unsigned long addr, int numpages);
1818
int set_memory_rw_nx(unsigned long addr, int numpages);
19-
void protect_kernel_text_data(void);
19+
static __always_inline int set_kernel_memory(char *startp, char *endp,
20+
int (*set_memory)(unsigned long start,
21+
int num_pages))
22+
{
23+
unsigned long start = (unsigned long)startp;
24+
unsigned long end = (unsigned long)endp;
25+
int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
26+
27+
return set_memory(start, num_pages);
28+
}
2029
#else
2130
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
2231
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
2332
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
2433
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
25-
static inline void protect_kernel_text_data(void) {}
2634
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
27-
#endif
28-
29-
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
30-
void __init protect_kernel_linear_mapping_text_rodata(void);
31-
#else
32-
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
35+
static inline int set_kernel_memory(char *startp, char *endp,
36+
int (*set_memory)(unsigned long start,
37+
int num_pages))
38+
{
39+
return 0;
40+
}
3341
#endif
3442

3543
int set_direct_map_invalid_noflush(struct page *page);

arch/riscv/kernel/setup.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -289,11 +289,6 @@ void __init setup_arch(char **cmdline_p)
289289
init_resources();
290290
sbi_init();
291291

292-
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
293-
protect_kernel_text_data();
294-
protect_kernel_linear_mapping_text_rodata();
295-
}
296-
297292
#ifdef CONFIG_KASAN
298293
kasan_init();
299294
#endif
@@ -328,11 +323,10 @@ subsys_initcall(topology_init);
328323

329324
void free_initmem(void)
330325
{
331-
unsigned long init_begin = (unsigned long)__init_begin;
332-
unsigned long init_end = (unsigned long)__init_end;
333-
334326
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
335-
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
327+
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
328+
IS_ENABLED(CONFIG_64BIT) ?
329+
set_memory_rw : set_memory_rw_nx);
336330

337331
free_initmem_default(POISON_FREE_INITMEM);
338332
}

arch/riscv/mm/init.c

Lines changed: 49 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,43 @@ asmlinkage void __init __copy_data(void)
455455
}
456456
#endif
457457

458+
#ifdef CONFIG_STRICT_KERNEL_RWX
459+
static __init pgprot_t pgprot_from_va(uintptr_t va)
460+
{
461+
if (is_va_kernel_text(va))
462+
return PAGE_KERNEL_READ_EXEC;
463+
464+
/*
465+
* In 64-bit kernel, the kernel mapping is outside the linear mapping so
466+
* we must protect its linear mapping alias from being executed and
467+
* written.
468+
* And rodata section is marked readonly in mark_rodata_ro.
469+
*/
470+
if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
471+
return PAGE_KERNEL_READ;
472+
473+
return PAGE_KERNEL;
474+
}
475+
476+
void mark_rodata_ro(void)
477+
{
478+
set_kernel_memory(__start_rodata, _data, set_memory_ro);
479+
if (IS_ENABLED(CONFIG_64BIT))
480+
set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
481+
set_memory_ro);
482+
483+
debug_checkwx();
484+
}
485+
#else
486+
static __init pgprot_t pgprot_from_va(uintptr_t va)
487+
{
488+
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
489+
return PAGE_KERNEL;
490+
491+
return PAGE_KERNEL_EXEC;
492+
}
493+
#endif /* CONFIG_STRICT_KERNEL_RWX */
494+
458495
/*
459496
* setup_vm() is called from head.S with MMU-off.
460497
*
@@ -474,7 +511,7 @@ asmlinkage void __init __copy_data(void)
474511
#endif
475512

476513
static uintptr_t load_pa __initdata;
477-
static uintptr_t load_sz __initdata;
514+
uintptr_t load_sz;
478515
#ifdef CONFIG_XIP_KERNEL
479516
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
480517
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
@@ -486,7 +523,8 @@ static uintptr_t xiprom_sz __initdata;
486523
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
487524
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
488525

489-
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
526+
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
527+
__always_unused bool early)
490528
{
491529
uintptr_t va, end_va;
492530

@@ -505,15 +543,18 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
505543
map_size, PAGE_KERNEL);
506544
}
507545
#else
508-
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
546+
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
547+
bool early)
509548
{
510549
uintptr_t va, end_va;
511550

512551
end_va = kernel_virt_addr + load_sz;
513552
for (va = kernel_virt_addr; va < end_va; va += map_size)
514553
create_pgd_mapping(pgdir, va,
515554
load_pa + (va - kernel_virt_addr),
516-
map_size, PAGE_KERNEL_EXEC);
555+
map_size,
556+
early ?
557+
PAGE_KERNEL_EXEC : pgprot_from_va(va));
517558
}
518559
#endif
519560

@@ -590,7 +631,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
590631
* us to reach paging_init(). We map all memory banks later
591632
* in setup_vm_final() below.
592633
*/
593-
create_kernel_page_table(early_pg_dir, map_size);
634+
create_kernel_page_table(early_pg_dir, map_size, true);
594635

595636
#ifndef __PAGETABLE_PMD_FOLDED
596637
/* Setup early PMD for DTB */
@@ -666,22 +707,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
666707
#endif
667708
}
668709

669-
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
670-
void __init protect_kernel_linear_mapping_text_rodata(void)
671-
{
672-
unsigned long text_start = (unsigned long)lm_alias(_start);
673-
unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
674-
unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
675-
unsigned long data_start = (unsigned long)lm_alias(_data);
676-
677-
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
678-
set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
679-
680-
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
681-
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
682-
}
683-
#endif
684-
685710
static void __init setup_vm_final(void)
686711
{
687712
uintptr_t va, map_size;
@@ -714,21 +739,15 @@ static void __init setup_vm_final(void)
714739
map_size = best_map_size(start, end - start);
715740
for (pa = start; pa < end; pa += map_size) {
716741
va = (uintptr_t)__va(pa);
717-
create_pgd_mapping(swapper_pg_dir, va, pa,
718-
map_size,
719-
#ifdef CONFIG_64BIT
720-
PAGE_KERNEL
721-
#else
722-
PAGE_KERNEL_EXEC
723-
#endif
724-
);
725742

743+
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
744+
pgprot_from_va(va));
726745
}
727746
}
728747

729748
#ifdef CONFIG_64BIT
730749
/* Map the kernel */
731-
create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
750+
create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
732751
#endif
733752

734753
/* Clear fixmap PTE and PMD mappings */
@@ -759,35 +778,6 @@ static inline void setup_vm_final(void)
759778
}
760779
#endif /* CONFIG_MMU */
761780

762-
#ifdef CONFIG_STRICT_KERNEL_RWX
763-
void __init protect_kernel_text_data(void)
764-
{
765-
unsigned long text_start = (unsigned long)_start;
766-
unsigned long init_text_start = (unsigned long)__init_text_begin;
767-
unsigned long init_data_start = (unsigned long)__init_data_begin;
768-
unsigned long rodata_start = (unsigned long)__start_rodata;
769-
unsigned long data_start = (unsigned long)_data;
770-
unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
771-
772-
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
773-
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
774-
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
775-
/* rodata section is marked readonly in mark_rodata_ro */
776-
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
777-
set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
778-
}
779-
780-
void mark_rodata_ro(void)
781-
{
782-
unsigned long rodata_start = (unsigned long)__start_rodata;
783-
unsigned long data_start = (unsigned long)_data;
784-
785-
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
786-
787-
debug_checkwx();
788-
}
789-
#endif
790-
791781
#ifdef CONFIG_KEXEC_CORE
792782
/*
793783
* reserve_crashkernel() - reserves memory for crash kernel

0 commit comments

Comments
 (0)