Skip to content

Commit 25abe0d

Browse files
Alexandre Ghitipalmer-dabbelt
authored andcommitted
riscv: Fix kfence now that the linear mapping can be backed by PUD/P4D/PGD
RISC-V Kfence implementation used to rely on the fact the linear mapping was backed by at most PMD hugepages, which is not true anymore since commit 3335068 ("riscv: Use PUD/P4D/PGD pages for the linear mapping"). Instead of splitting PUD/P4D/PGD mappings afterwards, directly map the kfence pool region using PTE mappings by allocating this region before setup_vm_final(). Reported-by: [email protected] Closes: https://syzkaller.appspot.com/bug?extid=a74d57bddabbedd75135 Fixes: 3335068 ("riscv: Use PUD/P4D/PGD pages for the linear mapping") Signed-off-by: Alexandre Ghiti <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 6569fc1 commit 25abe0d

File tree

2 files changed

+30
-38
lines changed

2 files changed

+30
-38
lines changed

arch/riscv/include/asm/kfence.h

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -8,41 +8,8 @@
88
#include <asm-generic/pgalloc.h>
99
#include <asm/pgtable.h>
1010

11-
static inline int split_pmd_page(unsigned long addr)
12-
{
13-
int i;
14-
unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
15-
pmd_t *pmd = pmd_off_k(addr);
16-
pte_t *pte = pte_alloc_one_kernel(&init_mm);
17-
18-
if (!pte)
19-
return -ENOMEM;
20-
21-
for (i = 0; i < PTRS_PER_PTE; i++)
22-
set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
23-
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
24-
25-
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
26-
return 0;
27-
}
28-
2911
static inline bool arch_kfence_init_pool(void)
3012
{
31-
int ret;
32-
unsigned long addr;
33-
pmd_t *pmd;
34-
35-
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
36-
addr += PAGE_SIZE) {
37-
pmd = pmd_off_k(addr);
38-
39-
if (pmd_leaf(*pmd)) {
40-
ret = split_pmd_page(addr);
41-
if (ret)
42-
return false;
43-
}
44-
}
45-
4613
return true;
4714
}
4815

arch/riscv/mm/init.c

Lines changed: 30 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#ifdef CONFIG_RELOCATABLE
2424
#include <linux/elf.h>
2525
#endif
26+
#include <linux/kfence.h>
2627

2728
#include <asm/fixmap.h>
2829
#include <asm/tlbflush.h>
@@ -1167,14 +1168,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
11671168
}
11681169

11691170
static void __init create_linear_mapping_range(phys_addr_t start,
1170-
phys_addr_t end)
1171+
phys_addr_t end,
1172+
uintptr_t fixed_map_size)
11711173
{
11721174
phys_addr_t pa;
11731175
uintptr_t va, map_size;
11741176

11751177
for (pa = start; pa < end; pa += map_size) {
11761178
va = (uintptr_t)__va(pa);
1177-
map_size = best_map_size(pa, end - pa);
1179+
map_size = fixed_map_size ? fixed_map_size :
1180+
best_map_size(pa, end - pa);
11781181

11791182
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
11801183
pgprot_from_va(va));
@@ -1184,6 +1187,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
11841187
static void __init create_linear_mapping_page_table(void)
11851188
{
11861189
phys_addr_t start, end;
1190+
phys_addr_t kfence_pool __maybe_unused;
11871191
u64 i;
11881192

11891193
#ifdef CONFIG_STRICT_KERNEL_RWX
@@ -1197,6 +1201,19 @@ static void __init create_linear_mapping_page_table(void)
11971201
memblock_mark_nomap(krodata_start, krodata_size);
11981202
#endif
11991203

1204+
#ifdef CONFIG_KFENCE
1205+
/*
1206+
* kfence pool must be backed by PAGE_SIZE mappings, so allocate it
1207+
* before we setup the linear mapping so that we avoid using hugepages
1208+
* for this region.
1209+
*/
1210+
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
1211+
BUG_ON(!kfence_pool);
1212+
1213+
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
1214+
__kfence_pool = __va(kfence_pool);
1215+
#endif
1216+
12001217
/* Map all memory banks in the linear mapping */
12011218
for_each_mem_range(i, &start, &end) {
12021219
if (start >= end)
@@ -1207,17 +1224,25 @@ static void __init create_linear_mapping_page_table(void)
12071224
if (end >= __pa(PAGE_OFFSET) + memory_limit)
12081225
end = __pa(PAGE_OFFSET) + memory_limit;
12091226

1210-
create_linear_mapping_range(start, end);
1227+
create_linear_mapping_range(start, end, 0);
12111228
}
12121229

12131230
#ifdef CONFIG_STRICT_KERNEL_RWX
1214-
create_linear_mapping_range(ktext_start, ktext_start + ktext_size);
1231+
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
12151232
create_linear_mapping_range(krodata_start,
1216-
krodata_start + krodata_size);
1233+
krodata_start + krodata_size, 0);
12171234

12181235
memblock_clear_nomap(ktext_start, ktext_size);
12191236
memblock_clear_nomap(krodata_start, krodata_size);
12201237
#endif
1238+
1239+
#ifdef CONFIG_KFENCE
1240+
create_linear_mapping_range(kfence_pool,
1241+
kfence_pool + KFENCE_POOL_SIZE,
1242+
PAGE_SIZE);
1243+
1244+
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
1245+
#endif
12211246
}
12221247

12231248
static void __init setup_vm_final(void)

0 commit comments

Comments
 (0)