Skip to content

Commit 6eb82f9

Browse files
joergroedelIngo Molnar
authored andcommitted
x86/mm: Pre-allocate P4D/PUD pages for vmalloc area
Pre-allocate the page-table pages for the vmalloc area at the level which needs synchronization on x86-64, which is P4D for 5-level and PUD for 4-level paging. Doing this at boot makes sure no synchronization of that area is necessary at runtime. The synchronization takes the pgd_lock and iterates over all page-tables in the system, so it can take quite long and is better avoided. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 92ed301 commit 6eb82f9

File tree

1 file changed

+52
-0
lines changed

1 file changed

+52
-0
lines changed

arch/x86/mm/init_64.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1238,6 +1238,56 @@ static void __init register_page_bootmem_info(void)
12381238
#endif
12391239
}
12401240

1241+
/*
1242+
* Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1243+
* Only the level which needs to be synchronized between all page-tables is
1244+
* allocated because the synchronization can be expensive.
1245+
*/
1246+
static void __init preallocate_vmalloc_pages(void)
1247+
{
1248+
unsigned long addr;
1249+
const char *lvl;
1250+
1251+
for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
1252+
pgd_t *pgd = pgd_offset_k(addr);
1253+
p4d_t *p4d;
1254+
pud_t *pud;
1255+
1256+
p4d = p4d_offset(pgd, addr);
1257+
if (p4d_none(*p4d)) {
1258+
/* Can only happen with 5-level paging */
1259+
p4d = p4d_alloc(&init_mm, pgd, addr);
1260+
if (!p4d) {
1261+
lvl = "p4d";
1262+
goto failed;
1263+
}
1264+
}
1265+
1266+
if (pgtable_l5_enabled())
1267+
continue;
1268+
1269+
pud = pud_offset(p4d, addr);
1270+
if (pud_none(*pud)) {
1271+
/* Ends up here only with 4-level paging */
1272+
pud = pud_alloc(&init_mm, p4d, addr);
1273+
if (!pud) {
1274+
lvl = "pud";
1275+
goto failed;
1276+
}
1277+
}
1278+
}
1279+
1280+
return;
1281+
1282+
failed:
1283+
1284+
/*
1285+
* The pages have to be there now or they will be missing in
1286+
* process page-tables later.
1287+
*/
1288+
panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
1289+
}
1290+
12411291
void __init mem_init(void)
12421292
{
12431293
pci_iommu_alloc();
@@ -1261,6 +1311,8 @@ void __init mem_init(void)
12611311
if (get_gate_vma(&init_mm))
12621312
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
12631313

1314+
preallocate_vmalloc_pages();
1315+
12641316
mem_init_print_info(NULL);
12651317
}
12661318

0 commit comments

Comments
 (0)