Skip to content

Commit af5c40c

Browse files
KAGA-KOKOsuryasaimadhu
authored andcommitted
x86/tlb: Uninline nmi_uaccess_okay()
cpu_tlbstate is exported because various TLB-related functions need access to it, but cpu_tlbstate is sensitive information which should only be accessed by well-contained kernel functions and not be directly exposed to modules. nmi_access_ok() is the last inline function which requires access to cpu_tlbstate. Move it into the TLB code. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 96f59fe commit af5c40c

File tree

2 files changed

+33
-32
lines changed

2 files changed

+33
-32
lines changed

arch/x86/include/asm/tlbflush.h

Lines changed: 1 addition & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -247,38 +247,7 @@ struct tlb_state {
247247
};
248248
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
249249

250-
/*
251-
* Blindly accessing user memory from NMI context can be dangerous
252-
* if we're in the middle of switching the current user task or
253-
* switching the loaded mm. It can also be dangerous if we
254-
* interrupted some kernel code that was temporarily using a
255-
* different mm.
256-
*/
257-
static inline bool nmi_uaccess_okay(void)
258-
{
259-
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
260-
struct mm_struct *current_mm = current->mm;
261-
262-
VM_WARN_ON_ONCE(!loaded_mm);
263-
264-
/*
265-
* The condition we want to check is
266-
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
267-
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
268-
* is supposed to be reasonably fast.
269-
*
270-
* Instead, we check the almost equivalent but somewhat conservative
271-
* condition below, and we rely on the fact that switch_mm_irqs_off()
272-
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
273-
*/
274-
if (loaded_mm != current_mm)
275-
return false;
276-
277-
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
278-
279-
return true;
280-
}
281-
250+
bool nmi_uaccess_okay(void);
282251
#define nmi_uaccess_okay nmi_uaccess_okay
283252

284253
void cr4_update_irqsoff(unsigned long set, unsigned long clear);

arch/x86/mm/tlb.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1094,6 +1094,38 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
10941094
put_cpu();
10951095
}
10961096

1097+
/*
1098+
* Blindly accessing user memory from NMI context can be dangerous
1099+
* if we're in the middle of switching the current user task or
1100+
* switching the loaded mm. It can also be dangerous if we
1101+
* interrupted some kernel code that was temporarily using a
1102+
* different mm.
1103+
*/
1104+
bool nmi_uaccess_okay(void)
1105+
{
1106+
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1107+
struct mm_struct *current_mm = current->mm;
1108+
1109+
VM_WARN_ON_ONCE(!loaded_mm);
1110+
1111+
/*
1112+
* The condition we want to check is
1113+
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
1114+
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
1115+
* is supposed to be reasonably fast.
1116+
*
1117+
* Instead, we check the almost equivalent but somewhat conservative
1118+
* condition below, and we rely on the fact that switch_mm_irqs_off()
1119+
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
1120+
*/
1121+
if (loaded_mm != current_mm)
1122+
return false;
1123+
1124+
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
1125+
1126+
return true;
1127+
}
1128+
10971129
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
10981130
size_t count, loff_t *ppos)
10991131
{

0 commit comments

Comments
 (0)