Skip to content

Commit 4b04e6c

Browse files
KAGA-KOKOsuryasaimadhu
authored andcommitted
x86/tlb: Move __flush_tlb_all() out of line
Reduce the number of required exports to one and make flush_tlb_global() static to the TLB code. flush_tlb_local() cannot be confined to the TLB code as the MTRR handling requires a PGE-less flush. Suggested-by: Christoph Hellwig <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 29def59 commit 4b04e6c

File tree

2 files changed

+23
-29
lines changed

2 files changed

+23
-29
lines changed

arch/x86/include/asm/tlbflush.h

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,8 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
142142

143143
struct flush_tlb_info;
144144

145+
void __flush_tlb_all(void);
145146
void flush_tlb_local(void);
146-
void flush_tlb_global(void);
147147
void flush_tlb_one_user(unsigned long addr);
148148
void flush_tlb_one_kernel(unsigned long addr);
149149
void flush_tlb_others(const struct cpumask *cpumask,
@@ -341,27 +341,6 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
341341

342342
extern void initialize_tlbstate_and_flush(void);
343343

344-
/*
345-
* flush everything
346-
*/
347-
static inline void __flush_tlb_all(void)
348-
{
349-
/*
350-
* This is to catch users with enabled preemption and the PGE feature
351-
* and don't trigger the warning in __native_flush_tlb().
352-
*/
353-
VM_WARN_ON_ONCE(preemptible());
354-
355-
if (boot_cpu_has(X86_FEATURE_PGE)) {
356-
flush_tlb_global();
357-
} else {
358-
/*
359-
* !PGE -> !PCID (setup_pcid()), thus every flush is total.
360-
*/
361-
flush_tlb_local();
362-
}
363-
}
364-
365344
#define TLB_FLUSH_ALL -1UL
366345

367346
/*

arch/x86/mm/tlb.c

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1018,12 +1018,6 @@ STATIC_NOPV void native_flush_tlb_global(void)
10181018
raw_local_irq_restore(flags);
10191019
}
10201020

1021-
void flush_tlb_global(void)
1022-
{
1023-
__flush_tlb_global();
1024-
}
1025-
EXPORT_SYMBOL_GPL(flush_tlb_global);
1026-
10271021
/*
10281022
* Flush the entire current user mapping
10291023
*/
@@ -1046,7 +1040,28 @@ void flush_tlb_local(void)
10461040
{
10471041
__flush_tlb_local();
10481042
}
1049-
EXPORT_SYMBOL_GPL(flush_tlb_local);
1043+
1044+
/*
1045+
* Flush everything
1046+
*/
1047+
void __flush_tlb_all(void)
1048+
{
1049+
/*
1050+
* This is to catch users with enabled preemption and the PGE feature
1051+
* and don't trigger the warning in __native_flush_tlb().
1052+
*/
1053+
VM_WARN_ON_ONCE(preemptible());
1054+
1055+
if (boot_cpu_has(X86_FEATURE_PGE)) {
1056+
__flush_tlb_global();
1057+
} else {
1058+
/*
1059+
* !PGE -> !PCID (setup_pcid()), thus every flush is total.
1060+
*/
1061+
flush_tlb_local();
1062+
}
1063+
}
1064+
EXPORT_SYMBOL_GPL(__flush_tlb_all);
10501065

10511066
/*
10521067
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.

0 commit comments

Comments
 (0)