Skip to content

Commit 2aeb073

Browse files
aryabininIngo Molnar
authored andcommitted
x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow
[ Note, this is a Git cherry-pick of the following commit: d17a1d9: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow") ... for easier x86 PTI code testing and back-porting. ] The KASAN shadow is currently mapped using vmemmap_populate() since that provides a semi-convenient way to map pages into init_top_pgt. However, since that no longer zeroes the mapped pages, it is not suitable for KASAN, which requires zeroed shadow memory. Add kasan_populate_shadow() interface and use it instead of vmemmap_populate(). Besides, this allows us to take advantage of gigantic pages and use them to populate the shadow, which should save us some memory wasted on page tables and reduce TLB pressure. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Andrey Ryabinin <[email protected]> Signed-off-by: Pavel Tatashin <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Steven Sistare <[email protected]> Cc: Daniel Jordan <[email protected]> Cc: Bob Picco <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: David S. Miller <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Sam Ravnborg <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3382290 commit 2aeb073

File tree

2 files changed

+137
-8
lines changed

2 files changed

+137
-8
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ config X86
108108
select HAVE_ARCH_AUDITSYSCALL
109109
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
110110
select HAVE_ARCH_JUMP_LABEL
111-
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
111+
select HAVE_ARCH_KASAN if X86_64
112112
select HAVE_ARCH_KGDB
113113
select HAVE_ARCH_KMEMCHECK
114114
select HAVE_ARCH_MMAP_RND_BITS if MMU

arch/x86/mm/kasan_init_64.c

Lines changed: 136 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,14 @@
44
#include <linux/bootmem.h>
55
#include <linux/kasan.h>
66
#include <linux/kdebug.h>
7+
#include <linux/memblock.h>
78
#include <linux/mm.h>
89
#include <linux/sched.h>
910
#include <linux/sched/task.h>
1011
#include <linux/vmalloc.h>
1112

1213
#include <asm/e820/types.h>
14+
#include <asm/pgalloc.h>
1315
#include <asm/tlbflush.h>
1416
#include <asm/sections.h>
1517
#include <asm/pgtable.h>
@@ -18,15 +20,142 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
1820

1921
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
2022

21-
static int __init map_range(struct range *range)
23+
static __init void *early_alloc(size_t size, int nid)
24+
{
25+
return memblock_virt_alloc_try_nid_nopanic(size, size,
26+
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
27+
}
28+
29+
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
30+
unsigned long end, int nid)
31+
{
32+
pte_t *pte;
33+
34+
if (pmd_none(*pmd)) {
35+
void *p;
36+
37+
if (boot_cpu_has(X86_FEATURE_PSE) &&
38+
((end - addr) == PMD_SIZE) &&
39+
IS_ALIGNED(addr, PMD_SIZE)) {
40+
p = early_alloc(PMD_SIZE, nid);
41+
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
42+
return;
43+
else if (p)
44+
memblock_free(__pa(p), PMD_SIZE);
45+
}
46+
47+
p = early_alloc(PAGE_SIZE, nid);
48+
pmd_populate_kernel(&init_mm, pmd, p);
49+
}
50+
51+
pte = pte_offset_kernel(pmd, addr);
52+
do {
53+
pte_t entry;
54+
void *p;
55+
56+
if (!pte_none(*pte))
57+
continue;
58+
59+
p = early_alloc(PAGE_SIZE, nid);
60+
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
61+
set_pte_at(&init_mm, addr, pte, entry);
62+
} while (pte++, addr += PAGE_SIZE, addr != end);
63+
}
64+
65+
static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
66+
unsigned long end, int nid)
67+
{
68+
pmd_t *pmd;
69+
unsigned long next;
70+
71+
if (pud_none(*pud)) {
72+
void *p;
73+
74+
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
75+
((end - addr) == PUD_SIZE) &&
76+
IS_ALIGNED(addr, PUD_SIZE)) {
77+
p = early_alloc(PUD_SIZE, nid);
78+
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
79+
return;
80+
else if (p)
81+
memblock_free(__pa(p), PUD_SIZE);
82+
}
83+
84+
p = early_alloc(PAGE_SIZE, nid);
85+
pud_populate(&init_mm, pud, p);
86+
}
87+
88+
pmd = pmd_offset(pud, addr);
89+
do {
90+
next = pmd_addr_end(addr, end);
91+
if (!pmd_large(*pmd))
92+
kasan_populate_pmd(pmd, addr, next, nid);
93+
} while (pmd++, addr = next, addr != end);
94+
}
95+
96+
static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
97+
unsigned long end, int nid)
98+
{
99+
pud_t *pud;
100+
unsigned long next;
101+
102+
if (p4d_none(*p4d)) {
103+
void *p = early_alloc(PAGE_SIZE, nid);
104+
105+
p4d_populate(&init_mm, p4d, p);
106+
}
107+
108+
pud = pud_offset(p4d, addr);
109+
do {
110+
next = pud_addr_end(addr, end);
111+
if (!pud_large(*pud))
112+
kasan_populate_pud(pud, addr, next, nid);
113+
} while (pud++, addr = next, addr != end);
114+
}
115+
116+
static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
117+
unsigned long end, int nid)
118+
{
119+
void *p;
120+
p4d_t *p4d;
121+
unsigned long next;
122+
123+
if (pgd_none(*pgd)) {
124+
p = early_alloc(PAGE_SIZE, nid);
125+
pgd_populate(&init_mm, pgd, p);
126+
}
127+
128+
p4d = p4d_offset(pgd, addr);
129+
do {
130+
next = p4d_addr_end(addr, end);
131+
kasan_populate_p4d(p4d, addr, next, nid);
132+
} while (p4d++, addr = next, addr != end);
133+
}
134+
135+
static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
136+
int nid)
137+
{
138+
pgd_t *pgd;
139+
unsigned long next;
140+
141+
addr = addr & PAGE_MASK;
142+
end = round_up(end, PAGE_SIZE);
143+
pgd = pgd_offset_k(addr);
144+
do {
145+
next = pgd_addr_end(addr, end);
146+
kasan_populate_pgd(pgd, addr, next, nid);
147+
} while (pgd++, addr = next, addr != end);
148+
}
149+
150+
static void __init map_range(struct range *range)
22151
{
23152
unsigned long start;
24153
unsigned long end;
25154

26155
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
27156
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
28157

29-
return vmemmap_populate(start, end, NUMA_NO_NODE);
158+
kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
30159
}
31160

32161
static void __init clear_pgds(unsigned long start,
@@ -189,16 +318,16 @@ void __init kasan_init(void)
189318
if (pfn_mapped[i].end == 0)
190319
break;
191320

192-
if (map_range(&pfn_mapped[i]))
193-
panic("kasan: unable to allocate shadow!");
321+
map_range(&pfn_mapped[i]);
194322
}
323+
195324
kasan_populate_zero_shadow(
196325
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
197326
kasan_mem_to_shadow((void *)__START_KERNEL_map));
198327

199-
vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
200-
(unsigned long)kasan_mem_to_shadow(_end),
201-
NUMA_NO_NODE);
328+
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
329+
(unsigned long)kasan_mem_to_shadow(_end),
330+
early_pfn_to_nid(__pa(_stext)));
202331

203332
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
204333
(void *)KASAN_SHADOW_END);

0 commit comments

Comments
 (0)