Skip to content

Commit f154f29

Browse files
joergroedelsuryasaimadhu
authored andcommitted
x86/mm/64: Flush global TLB on boot and AP bringup
The AP bringup code uses the trampoline_pgd page-table which establishes global mappings in the user range of the address space. Flush the global TLB entries after the indentity mappings are removed so no stale entries remain in the TLB. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9de4999 commit f154f29

File tree

4 files changed

+27
-7
lines changed

4 files changed

+27
-7
lines changed

arch/x86/include/asm/tlbflush.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,4 +261,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
261261

262262
#endif /* !MODULE */
263263

264+
static inline void __native_tlb_flush_global(unsigned long cr4)
265+
{
266+
native_write_cr4(cr4 ^ X86_CR4_PGE);
267+
native_write_cr4(cr4);
268+
}
264269
#endif /* _ASM_X86_TLBFLUSH_H */

arch/x86/kernel/head64.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
483483
/* Kill off the identity-map trampoline */
484484
reset_early_page_tables();
485485

486+
__native_tlb_flush_global(native_read_cr4());
487+
486488
clear_bss();
487489

488490
clear_page(init_top_pgt);

arch/x86/kernel/head_64.S

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,9 +166,26 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
166166
call sev_verify_cbit
167167
popq %rsi
168168

169-
/* Switch to new page-table */
169+
/*
170+
* Switch to new page-table
171+
*
172+
* For the boot CPU this switches to early_top_pgt which still has the
173+
* indentity mappings present. The secondary CPUs will switch to the
174+
* init_top_pgt here, away from the trampoline_pgd and unmap the
175+
* indentity mapped ranges.
176+
*/
170177
movq %rax, %cr3
171178

179+
/*
180+
* Do a global TLB flush after the CR3 switch to make sure the TLB
181+
* entries from the identity mapping are flushed.
182+
*/
183+
movq %cr4, %rcx
184+
movq %rcx, %rax
185+
xorq $X86_CR4_PGE, %rcx
186+
movq %rcx, %cr4
187+
movq %rax, %cr4
188+
172189
/* Ensure I am executing from virtual addresses */
173190
movq $1f, %rax
174191
ANNOTATE_RETPOLINE_SAFE

arch/x86/mm/tlb.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1148,7 +1148,7 @@ void flush_tlb_one_user(unsigned long addr)
11481148
*/
11491149
STATIC_NOPV void native_flush_tlb_global(void)
11501150
{
1151-
unsigned long cr4, flags;
1151+
unsigned long flags;
11521152

11531153
if (static_cpu_has(X86_FEATURE_INVPCID)) {
11541154
/*
@@ -1168,11 +1168,7 @@ STATIC_NOPV void native_flush_tlb_global(void)
11681168
*/
11691169
raw_local_irq_save(flags);
11701170

1171-
cr4 = this_cpu_read(cpu_tlbstate.cr4);
1172-
/* toggle PGE */
1173-
native_write_cr4(cr4 ^ X86_CR4_PGE);
1174-
/* write old PGE again and flush TLBs */
1175-
native_write_cr4(cr4);
1171+
__native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
11761172

11771173
raw_local_irq_restore(flags);
11781174
}

0 commit comments

Comments
 (0)