Skip to content

Commit cb2a023

Browse files
KAGA-KOKOsuryasaimadhu
authored andcommitted
x86/cr4: Sanitize CR4.PCE update
load_mm_cr4_irqsoff() is really a strange name for a function which has only one purpose: Update the CR4.PCE bit depending on the perf state. Rename it to update_cr4_pce_mm(), move it into the tlb code and provide a function which can be invoked by the perf smp function calls. Another step to remove exposure of cpu_tlbstate. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent d8f0b35 commit cb2a023

File tree

3 files changed

+25
-22
lines changed

3 files changed

+25
-22
lines changed

arch/x86/events/core.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2162,11 +2162,6 @@ static int x86_pmu_event_init(struct perf_event *event)
21622162
return err;
21632163
}
21642164

2165-
static void refresh_pce(void *ignored)
2166-
{
2167-
load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
2168-
}
2169-
21702165
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
21712166
{
21722167
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2185,7 +2180,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
21852180
lockdep_assert_held_write(&mm->mmap_sem);
21862181

21872182
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2188-
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2183+
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
21892184
}
21902185

21912186
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2195,7 +2190,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
21952190
return;
21962191

21972192
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2198-
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2193+
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
21992194
}
22002195

22012196
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2253,7 +2248,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
22532248
else if (x86_pmu.attr_rdpmc == 2)
22542249
static_branch_dec(&rdpmc_always_available_key);
22552250

2256-
on_each_cpu(refresh_pce, NULL, 1);
2251+
on_each_cpu(cr4_update_pce, NULL, 1);
22572252
x86_pmu.attr_rdpmc = val;
22582253
}
22592254

arch/x86/include/asm/mmu_context.h

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -24,21 +24,9 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
2424
#endif /* !CONFIG_PARAVIRT_XXL */
2525

2626
#ifdef CONFIG_PERF_EVENTS
27-
2827
DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
2928
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30-
31-
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
32-
{
33-
if (static_branch_unlikely(&rdpmc_always_available_key) ||
34-
(!static_branch_unlikely(&rdpmc_never_available_key) &&
35-
atomic_read(&mm->context.perf_rdpmc_allowed)))
36-
cr4_set_bits_irqsoff(X86_CR4_PCE);
37-
else
38-
cr4_clear_bits_irqsoff(X86_CR4_PCE);
39-
}
40-
#else
41-
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
29+
void cr4_update_pce(void *ignored);
4230
#endif
4331

4432
#ifdef CONFIG_MODIFY_LDT_SYSCALL

arch/x86/mm/tlb.c

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,26 @@ static void cond_ibpb(struct task_struct *next)
272272
}
273273
}
274274

275+
#ifdef CONFIG_PERF_EVENTS
276+
static inline void cr4_update_pce_mm(struct mm_struct *mm)
277+
{
278+
if (static_branch_unlikely(&rdpmc_always_available_key) ||
279+
(!static_branch_unlikely(&rdpmc_never_available_key) &&
280+
atomic_read(&mm->context.perf_rdpmc_allowed)))
281+
cr4_set_bits_irqsoff(X86_CR4_PCE);
282+
else
283+
cr4_clear_bits_irqsoff(X86_CR4_PCE);
284+
}
285+
286+
void cr4_update_pce(void *ignored)
287+
{
288+
cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
289+
}
290+
291+
#else
292+
static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
293+
#endif
294+
275295
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
276296
struct task_struct *tsk)
277297
{
@@ -440,7 +460,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
440460
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
441461

442462
if (next != real_prev) {
443-
load_mm_cr4_irqsoff(next);
463+
cr4_update_pce_mm(next);
444464
switch_ldt(real_prev, next);
445465
}
446466
}

0 commit comments

Comments
 (0)