Skip to content

Commit f4dd60a

Browse files
committed
Merge tag 'x86-mm-2020-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "Misc changes: - Unexport various PAT primitives - Unexport per-CPU tlbstate and uninline TLB helpers" * tag 'x86-mm-2020-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) x86/tlb/uv: Add a forward declaration for struct flush_tlb_info x86/cpu: Export native_write_cr4() only when CONFIG_LKTDM=m x86/tlb: Restrict access to tlbstate xen/privcmd: Remove unneeded asm/tlb.h include x86/tlb: Move PCID helpers where they are used x86/tlb: Uninline nmi_uaccess_okay() x86/tlb: Move cr4_set_bits_and_update_boot() to the usage site x86/tlb: Move paravirt_tlb_remove_table() to the usage site x86/tlb: Move __flush_tlb_all() out of line x86/tlb: Move flush_tlb_others() out of line x86/tlb: Move __flush_tlb_one_kernel() out of line x86/tlb: Move __flush_tlb_one_user() out of line x86/tlb: Move __flush_tlb_global() out of line x86/tlb: Move __flush_tlb() out of line x86/alternatives: Move temporary_mm helpers into C x86/cr4: Sanitize CR4.PCE update x86/cpu: Uninline CR4 accessors x86/tlb: Uninline __get_current_cr3_fast() x86/mm: Use pgprotval_t in protval_4k_2_large() and protval_large_2_4k() x86/mm: Unexport __cachemode2pte_tbl ...
2 parents 435faf5 + bd1de2a commit f4dd60a

File tree

24 files changed

+608
-586
lines changed

24 files changed

+608
-586
lines changed

arch/x86/events/core.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2166,11 +2166,6 @@ static int x86_pmu_event_init(struct perf_event *event)
21662166
return err;
21672167
}
21682168

2169-
static void refresh_pce(void *ignored)
2170-
{
2171-
load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
2172-
}
2173-
21742169
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
21752170
{
21762171
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2189,7 +2184,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
21892184
lockdep_assert_held_write(&mm->mmap_sem);
21902185

21912186
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2192-
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2187+
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
21932188
}
21942189

21952190
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2199,7 +2194,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
21992194
return;
22002195

22012196
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2202-
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2197+
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
22032198
}
22042199

22052200
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2257,7 +2252,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
22572252
else if (x86_pmu.attr_rdpmc == 2)
22582253
static_branch_dec(&rdpmc_always_available_key);
22592254

2260-
on_each_cpu(refresh_pce, NULL, 1);
2255+
on_each_cpu(cr4_update_pce, NULL, 1);
22612256
x86_pmu.attr_rdpmc = val;
22622257
}
22632258

arch/x86/include/asm/memtype.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,7 @@ extern void memtype_free_io(resource_size_t start, resource_size_t end);
2424

2525
extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
2626

27+
bool x86_has_pat_wp(void);
28+
enum page_cache_mode pgprot2cachemode(pgprot_t pgprot);
29+
2730
#endif /* _ASM_X86_MEMTYPE_H */

arch/x86/include/asm/mmu_context.h

Lines changed: 2 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -24,21 +24,9 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
2424
#endif /* !CONFIG_PARAVIRT_XXL */
2525

2626
#ifdef CONFIG_PERF_EVENTS
27-
2827
DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
2928
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30-
31-
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
32-
{
33-
if (static_branch_unlikely(&rdpmc_always_available_key) ||
34-
(!static_branch_unlikely(&rdpmc_never_available_key) &&
35-
atomic_read(&mm->context.perf_rdpmc_allowed)))
36-
cr4_set_bits_irqsoff(X86_CR4_PCE);
37-
else
38-
cr4_clear_bits_irqsoff(X86_CR4_PCE);
39-
}
40-
#else
41-
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
29+
void cr4_update_pce(void *ignored);
4230
#endif
4331

4432
#ifdef CONFIG_MODIFY_LDT_SYSCALL
@@ -225,78 +213,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
225213
return __pkru_allows_pkey(vma_pkey(vma), write);
226214
}
227215

228-
/*
229-
* This can be used from process context to figure out what the value of
230-
* CR3 is without needing to do a (slow) __read_cr3().
231-
*
232-
* It's intended to be used for code like KVM that sneakily changes CR3
233-
* and needs to restore it. It needs to be used very carefully.
234-
*/
235-
static inline unsigned long __get_current_cr3_fast(void)
236-
{
237-
unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
238-
this_cpu_read(cpu_tlbstate.loaded_mm_asid));
239-
240-
/* For now, be very restrictive about when this can be called. */
241-
VM_WARN_ON(in_nmi() || preemptible());
242-
243-
VM_BUG_ON(cr3 != __read_cr3());
244-
return cr3;
245-
}
246-
247-
typedef struct {
248-
struct mm_struct *mm;
249-
} temp_mm_state_t;
250-
251-
/*
252-
* Using a temporary mm allows to set temporary mappings that are not accessible
253-
* by other CPUs. Such mappings are needed to perform sensitive memory writes
254-
* that override the kernel memory protections (e.g., W^X), without exposing the
255-
* temporary page-table mappings that are required for these write operations to
256-
* other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
257-
* mapping is torn down.
258-
*
259-
* Context: The temporary mm needs to be used exclusively by a single core. To
260-
* harden security IRQs must be disabled while the temporary mm is
261-
* loaded, thereby preventing interrupt handler bugs from overriding
262-
* the kernel memory protection.
263-
*/
264-
static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
265-
{
266-
temp_mm_state_t temp_state;
267-
268-
lockdep_assert_irqs_disabled();
269-
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
270-
switch_mm_irqs_off(NULL, mm, current);
271-
272-
/*
273-
* If breakpoints are enabled, disable them while the temporary mm is
274-
* used. Userspace might set up watchpoints on addresses that are used
275-
* in the temporary mm, which would lead to wrong signals being sent or
276-
* crashes.
277-
*
278-
* Note that breakpoints are not disabled selectively, which also causes
279-
* kernel breakpoints (e.g., perf's) to be disabled. This might be
280-
* undesirable, but still seems reasonable as the code that runs in the
281-
* temporary mm should be short.
282-
*/
283-
if (hw_breakpoint_active())
284-
hw_breakpoint_disable();
285-
286-
return temp_state;
287-
}
288-
289-
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
290-
{
291-
lockdep_assert_irqs_disabled();
292-
switch_mm_irqs_off(NULL, prev_state.mm, current);
293-
294-
/*
295-
* Restore the breakpoints if they were disabled before the temporary mm
296-
* was loaded.
297-
*/
298-
if (hw_breakpoint_active())
299-
hw_breakpoint_restore();
300-
}
216+
unsigned long __get_current_cr3_fast(void);
301217

302218
#endif /* _ASM_X86_MMU_CONTEXT_H */

arch/x86/include/asm/paravirt.h

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,13 @@ static inline void slow_down_io(void)
4747
#endif
4848
}
4949

50-
static inline void __flush_tlb(void)
50+
void native_flush_tlb_local(void);
51+
void native_flush_tlb_global(void);
52+
void native_flush_tlb_one_user(unsigned long addr);
53+
void native_flush_tlb_others(const struct cpumask *cpumask,
54+
const struct flush_tlb_info *info);
55+
56+
static inline void __flush_tlb_local(void)
5157
{
5258
PVOP_VCALL0(mmu.flush_tlb_user);
5359
}
@@ -62,8 +68,8 @@ static inline void __flush_tlb_one_user(unsigned long addr)
6268
PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
6369
}
6470

65-
static inline void flush_tlb_others(const struct cpumask *cpumask,
66-
const struct flush_tlb_info *info)
71+
static inline void __flush_tlb_others(const struct cpumask *cpumask,
72+
const struct flush_tlb_info *info)
6773
{
6874
PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
6975
}

arch/x86/include/asm/pgtable_32.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ void sync_initial_page_table(void);
6060
#define kpte_clear_flush(ptep, vaddr) \
6161
do { \
6262
pte_clear(&init_mm, (vaddr), (ptep)); \
63-
__flush_tlb_one_kernel((vaddr)); \
63+
flush_tlb_one_kernel((vaddr)); \
6464
} while (0)
6565

6666
#endif /* !__ASSEMBLY__ */

arch/x86/include/asm/pgtable_types.h

Lines changed: 12 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -471,9 +471,6 @@ static inline pteval_t pte_flags(pte_t pte)
471471
return native_pte_val(pte) & PTE_FLAGS_MASK;
472472
}
473473

474-
extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
475-
extern uint8_t __pte2cachemode_tbl[8];
476-
477474
#define __pte2cm_idx(cb) \
478475
((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
479476
(((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
@@ -483,43 +480,26 @@ extern uint8_t __pte2cachemode_tbl[8];
483480
(((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
484481
(((i) & 1) << _PAGE_BIT_PWT))
485482

486-
static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
483+
unsigned long cachemode2protval(enum page_cache_mode pcm);
484+
485+
static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
487486
{
488-
if (likely(pcm == 0))
489-
return 0;
490-
return __cachemode2pte_tbl[pcm];
487+
return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
488+
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
491489
}
492-
static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
490+
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
493491
{
494-
return __pgprot(cachemode2protval(pcm));
492+
return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
495493
}
496-
static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
494+
static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
497495
{
498-
unsigned long masked;
499-
500-
masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
501-
if (likely(masked == 0))
502-
return 0;
503-
return __pte2cachemode_tbl[__pte2cm_idx(masked)];
504-
}
505-
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
506-
{
507-
pgprotval_t val = pgprot_val(pgprot);
508-
pgprot_t new;
509-
510-
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
511-
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
512-
return new;
496+
return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
497+
((val & _PAGE_PAT_LARGE) >>
498+
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
513499
}
514500
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
515501
{
516-
pgprotval_t val = pgprot_val(pgprot);
517-
pgprot_t new;
518-
519-
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
520-
((val & _PAGE_PAT_LARGE) >>
521-
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
522-
return new;
502+
return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
523503
}
524504

525505

0 commit comments

Comments
 (0)