Skip to content

Commit 650400b

Browse files
author
Ingo Molnar
committed
Merge branch 'upstream-x86-selftests' into WIP.x86/pti.base
Conflicts: arch/x86/kernel/cpu/Makefile Signed-off-by: Ingo Molnar <[email protected]>
2 parents 0fd2e9c + fec8f5a commit 650400b

File tree

19 files changed

+613
-522
lines changed

19 files changed

+613
-522
lines changed

Documentation/x86/x86_64/mm.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
3434
ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
3535
ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
3636
... unused hole ...
37-
ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
37+
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
3838
... unused hole ...
3939
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
4040
... unused hole ...

arch/x86/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
303303
config KASAN_SHADOW_OFFSET
304304
hex
305305
depends on KASAN
306-
default 0xdff8000000000000 if X86_5LEVEL
307306
default 0xdffffc0000000000
308307

309308
config HAVE_INTEL_TXT

arch/x86/entry/syscalls/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# SPDX-License-Identifier: GPL-2.0
2-
out := $(obj)/../../include/generated/asm
3-
uapi := $(obj)/../../include/generated/uapi/asm
2+
out := arch/$(SRCARCH)/include/generated/asm
3+
uapi := arch/$(SRCARCH)/include/generated/uapi/asm
44

55
# Create output directory if not already present
66
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \

arch/x86/include/asm/cpufeatures.h

Lines changed: 272 additions & 273 deletions
Large diffs are not rendered by default.

arch/x86/include/asm/pgtable_types.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,10 +200,9 @@ enum page_cache_mode {
200200

201201
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
202202

203-
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
204-
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
205203
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
206204
_PAGE_DIRTY | _PAGE_ENC)
205+
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
207206

208207
#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC)
209208
#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)

arch/x86/kernel/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ endif
2525
KASAN_SANITIZE_head$(BITS).o := n
2626
KASAN_SANITIZE_dumpstack.o := n
2727
KASAN_SANITIZE_dumpstack_$(BITS).o := n
28-
KASAN_SANITIZE_stacktrace.o := n
28+
KASAN_SANITIZE_stacktrace.o := n
29+
KASAN_SANITIZE_paravirt.o := n
2930

3031
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
3132
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y

arch/x86/kernel/cpu/cpuid-deps.c

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -62,23 +62,19 @@ const static struct cpuid_dep cpuid_deps[] = {
6262
{}
6363
};
6464

65-
static inline void __clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit)
66-
{
67-
clear_bit32(bit, c->x86_capability);
68-
}
69-
70-
static inline void __setup_clear_cpu_cap(unsigned int bit)
71-
{
72-
clear_cpu_cap(&boot_cpu_data, bit);
73-
set_bit32(bit, cpu_caps_cleared);
74-
}
75-
7665
static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature)
7766
{
78-
if (!c)
79-
__setup_clear_cpu_cap(feature);
80-
else
81-
__clear_cpu_cap(c, feature);
67+
/*
68+
* Note: This could use the non atomic __*_bit() variants, but the
69+
* rest of the cpufeature code uses atomics as well, so keep it for
70+
* consistency. Cleanup all of it separately.
71+
*/
72+
if (!c) {
73+
clear_cpu_cap(&boot_cpu_data, feature);
74+
set_bit(feature, (unsigned long *)cpu_caps_cleared);
75+
} else {
76+
clear_bit(feature, (unsigned long *)c->x86_capability);
77+
}
8278
}
8379

8480
/* Take the capabilities and the BUG bits into account */

arch/x86/kernel/head_64.S

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,12 @@
3838
*
3939
*/
4040

41-
#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
4241
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
4342

43+
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
4444
PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
4545
PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
46+
#endif
4647
L3_START_KERNEL = pud_index(__START_KERNEL_map)
4748

4849
.text
@@ -362,10 +363,7 @@ NEXT_PAGE(early_dynamic_pgts)
362363

363364
.data
364365

365-
#ifndef CONFIG_XEN
366-
NEXT_PAGE(init_top_pgt)
367-
.fill 512,8,0
368-
#else
366+
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
369367
NEXT_PAGE(init_top_pgt)
370368
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
371369
.org init_top_pgt + PGD_PAGE_OFFSET*8, 0
@@ -382,6 +380,9 @@ NEXT_PAGE(level2_ident_pgt)
382380
* Don't set NX because code runs from these pages.
383381
*/
384382
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
383+
#else
384+
NEXT_PAGE(init_top_pgt)
385+
.fill 512,8,0
385386
#endif
386387

387388
#ifdef CONFIG_X86_5LEVEL

arch/x86/mm/init_64.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1426,16 +1426,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
14261426

14271427
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
14281428
void register_page_bootmem_memmap(unsigned long section_nr,
1429-
struct page *start_page, unsigned long size)
1429+
struct page *start_page, unsigned long nr_pages)
14301430
{
14311431
unsigned long addr = (unsigned long)start_page;
1432-
unsigned long end = (unsigned long)(start_page + size);
1432+
unsigned long end = (unsigned long)(start_page + nr_pages);
14331433
unsigned long next;
14341434
pgd_t *pgd;
14351435
p4d_t *p4d;
14361436
pud_t *pud;
14371437
pmd_t *pmd;
1438-
unsigned int nr_pages;
1438+
unsigned int nr_pmd_pages;
14391439
struct page *page;
14401440

14411441
for (; addr < end; addr = next) {
@@ -1482,9 +1482,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
14821482
if (pmd_none(*pmd))
14831483
continue;
14841484

1485-
nr_pages = 1 << (get_order(PMD_SIZE));
1485+
nr_pmd_pages = 1 << get_order(PMD_SIZE);
14861486
page = pmd_page(*pmd);
1487-
while (nr_pages--)
1487+
while (nr_pmd_pages--)
14881488
get_page_bootmem(section_nr, page++,
14891489
SECTION_INFO);
14901490
}

arch/x86/mm/kasan_init_64.c

Lines changed: 80 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616

1717
extern struct range pfn_mapped[E820_MAX_ENTRIES];
1818

19+
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
20+
1921
static int __init map_range(struct range *range)
2022
{
2123
unsigned long start;
@@ -31,8 +33,10 @@ static void __init clear_pgds(unsigned long start,
3133
unsigned long end)
3234
{
3335
pgd_t *pgd;
36+
/* See comment in kasan_init() */
37+
unsigned long pgd_end = end & PGDIR_MASK;
3438

35-
for (; start < end; start += PGDIR_SIZE) {
39+
for (; start < pgd_end; start += PGDIR_SIZE) {
3640
pgd = pgd_offset_k(start);
3741
/*
3842
* With folded p4d, pgd_clear() is nop, use p4d_clear()
@@ -43,29 +47,61 @@ static void __init clear_pgds(unsigned long start,
4347
else
4448
pgd_clear(pgd);
4549
}
50+
51+
pgd = pgd_offset_k(start);
52+
for (; start < end; start += P4D_SIZE)
53+
p4d_clear(p4d_offset(pgd, start));
54+
}
55+
56+
static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
57+
{
58+
unsigned long p4d;
59+
60+
if (!IS_ENABLED(CONFIG_X86_5LEVEL))
61+
return (p4d_t *)pgd;
62+
63+
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
64+
p4d += __START_KERNEL_map - phys_base;
65+
return (p4d_t *)p4d + p4d_index(addr);
66+
}
67+
68+
static void __init kasan_early_p4d_populate(pgd_t *pgd,
69+
unsigned long addr,
70+
unsigned long end)
71+
{
72+
pgd_t pgd_entry;
73+
p4d_t *p4d, p4d_entry;
74+
unsigned long next;
75+
76+
if (pgd_none(*pgd)) {
77+
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
78+
set_pgd(pgd, pgd_entry);
79+
}
80+
81+
p4d = early_p4d_offset(pgd, addr);
82+
do {
83+
next = p4d_addr_end(addr, end);
84+
85+
if (!p4d_none(*p4d))
86+
continue;
87+
88+
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
89+
set_p4d(p4d, p4d_entry);
90+
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
4691
}
4792

4893
static void __init kasan_map_early_shadow(pgd_t *pgd)
4994
{
50-
int i;
51-
unsigned long start = KASAN_SHADOW_START;
95+
/* See comment in kasan_init() */
96+
unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
5297
unsigned long end = KASAN_SHADOW_END;
98+
unsigned long next;
5399

54-
for (i = pgd_index(start); start < end; i++) {
55-
switch (CONFIG_PGTABLE_LEVELS) {
56-
case 4:
57-
pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
58-
_KERNPG_TABLE);
59-
break;
60-
case 5:
61-
pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
62-
_KERNPG_TABLE);
63-
break;
64-
default:
65-
BUILD_BUG();
66-
}
67-
start += PGDIR_SIZE;
68-
}
100+
pgd += pgd_index(addr);
101+
do {
102+
next = pgd_addr_end(addr, end);
103+
kasan_early_p4d_populate(pgd, addr, next);
104+
} while (pgd++, addr = next, addr != end);
69105
}
70106

71107
#ifdef CONFIG_KASAN_INLINE
@@ -102,7 +138,7 @@ void __init kasan_early_init(void)
102138
for (i = 0; i < PTRS_PER_PUD; i++)
103139
kasan_zero_pud[i] = __pud(pud_val);
104140

105-
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
141+
for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
106142
kasan_zero_p4d[i] = __p4d(p4d_val);
107143

108144
kasan_map_early_shadow(early_top_pgt);
@@ -118,12 +154,35 @@ void __init kasan_init(void)
118154
#endif
119155

120156
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
157+
158+
/*
159+
* We use the same shadow offset for 4- and 5-level paging to
160+
* facilitate boot-time switching between paging modes.
161+
* As result in 5-level paging mode KASAN_SHADOW_START and
162+
* KASAN_SHADOW_END are not aligned to PGD boundary.
163+
*
164+
* KASAN_SHADOW_START doesn't share PGD with anything else.
165+
* We claim whole PGD entry to make things easier.
166+
*
167+
* KASAN_SHADOW_END lands in the last PGD entry and it collides with
168+
* bunch of things like kernel code, modules, EFI mapping, etc.
169+
* We need to take extra steps to not overwrite them.
170+
*/
171+
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
172+
void *ptr;
173+
174+
ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
175+
memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
176+
set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
177+
__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
178+
}
179+
121180
load_cr3(early_top_pgt);
122181
__flush_tlb_all();
123182

124-
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
183+
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
125184

126-
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
185+
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
127186
kasan_mem_to_shadow((void *)PAGE_OFFSET));
128187

129188
for (i = 0; i < E820_MAX_ENTRIES; i++) {

0 commit comments

Comments
 (0)