Skip to content

Commit 2efad17

Browse files
AlexGhitipalmer-dabbelt
authored andcommitted
riscv: Split early kasan mapping to prepare sv48 introduction
Now that kasan shadow region is next to the kernel, for sv48, this region won't be aligned on PGDIR_SIZE and then when populating this region, we'll need to get down to lower levels of the page table. So instead of reimplementing the page table walk for the early population, take advantage of the existing functions used for the final population. Note that kasan swapper initialization must also be split since memblock is not initialized at this point and as the last PGD is shared with the kernel, we'd need to allocate a PUD so postpone the kasan final population after the kernel population is done. Signed-off-by: Alexandre Ghiti <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent f7ae023 commit 2efad17

File tree

3 files changed

+67
-51
lines changed

3 files changed

+67
-51
lines changed

arch/riscv/include/asm/kasan.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
void kasan_init(void);
3636
asmlinkage void kasan_early_init(void);
37+
void kasan_swapper_init(void);
3738

3839
#endif
3940
#endif

arch/riscv/mm/init.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -741,6 +741,10 @@ static void __init setup_vm_final(void)
741741
create_kernel_page_table(swapper_pg_dir, false);
742742
#endif
743743

744+
#ifdef CONFIG_KASAN
745+
kasan_swapper_init();
746+
#endif
747+
744748
/* Clear fixmap PTE and PMD mappings */
745749
clear_fixmap(FIX_PTE);
746750
clear_fixmap(FIX_PMD);

arch/riscv/mm/kasan_init.c

Lines changed: 62 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -12,44 +12,6 @@
1212
#include <asm/pgalloc.h>
1313

1414
extern pgd_t early_pg_dir[PTRS_PER_PGD];
15-
asmlinkage void __init kasan_early_init(void)
16-
{
17-
uintptr_t i;
18-
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
19-
20-
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
21-
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
22-
23-
for (i = 0; i < PTRS_PER_PTE; ++i)
24-
set_pte(kasan_early_shadow_pte + i,
25-
mk_pte(virt_to_page(kasan_early_shadow_page),
26-
PAGE_KERNEL));
27-
28-
for (i = 0; i < PTRS_PER_PMD; ++i)
29-
set_pmd(kasan_early_shadow_pmd + i,
30-
pfn_pmd(PFN_DOWN
31-
(__pa((uintptr_t) kasan_early_shadow_pte)),
32-
__pgprot(_PAGE_TABLE)));
33-
34-
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
35-
i += PGDIR_SIZE, ++pgd)
36-
set_pgd(pgd,
37-
pfn_pgd(PFN_DOWN
38-
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
39-
__pgprot(_PAGE_TABLE)));
40-
41-
/* init for swapper_pg_dir */
42-
pgd = pgd_offset_k(KASAN_SHADOW_START);
43-
44-
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
45-
i += PGDIR_SIZE, ++pgd)
46-
set_pgd(pgd,
47-
pfn_pgd(PFN_DOWN
48-
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
49-
__pgprot(_PAGE_TABLE)));
50-
51-
local_flush_tlb_all();
52-
}
5315

5416
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
5517
{
@@ -108,39 +70,88 @@ static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned
10870
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
10971
}
11072

111-
static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
73+
static void __init kasan_populate_pgd(pgd_t *pgdp,
74+
unsigned long vaddr, unsigned long end,
75+
bool early)
11276
{
11377
phys_addr_t phys_addr;
114-
pgd_t *pgdp = pgd_offset_k(vaddr);
11578
unsigned long next;
11679

11780
do {
11881
next = pgd_addr_end(vaddr, end);
11982

120-
/*
121-
* pgdp can't be none since kasan_early_init initialized all KASAN
122-
* shadow region with kasan_early_shadow_pmd: if this is stillthe case,
123-
* that means we can try to allocate a hugepage as a replacement.
124-
*/
125-
if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
126-
IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
127-
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
128-
if (phys_addr) {
129-
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
83+
if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
84+
if (early) {
85+
phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
86+
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
13087
continue;
88+
} else if (pgd_page_vaddr(*pgdp) ==
89+
(unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
90+
/*
91+
* pgdp can't be none since kasan_early_init
92+
* initialized all KASAN shadow region with
93+
* kasan_early_shadow_pud: if this is still the
94+
* case, that means we can try to allocate a
95+
* hugepage as a replacement.
96+
*/
97+
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
98+
if (phys_addr) {
99+
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
100+
continue;
101+
}
131102
}
132103
}
133104

134105
kasan_populate_pmd(pgdp, vaddr, next);
135106
} while (pgdp++, vaddr = next, vaddr != end);
136107
}
137108

109+
asmlinkage void __init kasan_early_init(void)
110+
{
111+
uintptr_t i;
112+
113+
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
114+
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
115+
116+
for (i = 0; i < PTRS_PER_PTE; ++i)
117+
set_pte(kasan_early_shadow_pte + i,
118+
mk_pte(virt_to_page(kasan_early_shadow_page),
119+
PAGE_KERNEL));
120+
121+
for (i = 0; i < PTRS_PER_PMD; ++i)
122+
set_pmd(kasan_early_shadow_pmd + i,
123+
pfn_pmd(PFN_DOWN
124+
(__pa((uintptr_t)kasan_early_shadow_pte)),
125+
PAGE_TABLE));
126+
127+
if (pgtable_l4_enabled) {
128+
for (i = 0; i < PTRS_PER_PUD; ++i)
129+
set_pud(kasan_early_shadow_pud + i,
130+
pfn_pud(PFN_DOWN
131+
(__pa(((uintptr_t)kasan_early_shadow_pmd))),
132+
PAGE_TABLE));
133+
}
134+
135+
kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
136+
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
137+
138+
local_flush_tlb_all();
139+
}
140+
141+
void __init kasan_swapper_init(void)
142+
{
143+
kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
144+
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
145+
146+
local_flush_tlb_all();
147+
}
148+
138149
static void __init kasan_populate(void *start, void *end)
139150
{
140151
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
141152
unsigned long vend = PAGE_ALIGN((unsigned long)end);
142153

143-
kasan_populate_pgd(vaddr, vend);
154+
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
144155

145156
local_flush_tlb_all();
146157
memset(start, KASAN_SHADOW_INIT, end - start);

0 commit comments

Comments
 (0)