Skip to content

Commit a23eb2f

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86/intel: Support the PEBS event mask
The current perf assumes that the counters that support PEBS are contiguous. But it's not guaranteed with the new leaf 0x23 introduced. The counters are enumerated with a counter mask. There may be holes in the counter mask for future platforms or in a virtualization environment. Store the PEBS event mask rather than the maximum number of PEBS counters in the x86 PMU structures. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Andi Kleen <[email protected]> Reviewed-by: Ian Rogers <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 2657986 commit a23eb2f

File tree

4 files changed

+26
-13
lines changed

4 files changed

+26
-13
lines changed

arch/x86/events/intel/core.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4728,7 +4728,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
47284728
{
47294729
intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
47304730
&pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1);
4731-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
4731+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
47324732
pmu->unconstrained = (struct event_constraint)
47334733
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
47344734
0, pmu->num_counters, 0, 0);
@@ -6070,7 +6070,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
60706070

60716071
pmu->num_counters = x86_pmu.num_counters;
60726072
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6073-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6073+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
60746074
pmu->unconstrained = (struct event_constraint)
60756075
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
60766076
0, pmu->num_counters, 0, 0);
@@ -6193,7 +6193,7 @@ __init int intel_pmu_init(void)
61936193
x86_pmu.events_maskl = ebx.full;
61946194
x86_pmu.events_mask_len = eax.split.mask_length;
61956195

6196-
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
6196+
x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(x86_pmu.num_counters - 1, 0));
61976197
x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
61986198

61996199
/*
@@ -6822,7 +6822,7 @@ __init int intel_pmu_init(void)
68226822
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
68236823
}
68246824

6825-
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6825+
pmu->pebs_events_mask = intel_pmu_pebs_mask(GENMASK_ULL(pmu->num_counters - 1, 0));
68266826
pmu->unconstrained = (struct event_constraint)
68276827
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
68286828
0, pmu->num_counters, 0, 0);

arch/x86/events/intel/ds.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1137,7 +1137,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sche
11371137
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
11381138
{
11391139
struct debug_store *ds = cpuc->ds;
1140-
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1140+
int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu);
11411141
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
11421142
u64 threshold;
11431143
int reserved;
@@ -2157,6 +2157,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
21572157
void *base, *at, *top;
21582158
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
21592159
short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2160+
int max_pebs_events = intel_pmu_max_num_pebs(NULL);
21602161
int bit, i, size;
21612162
u64 mask;
21622163

@@ -2168,8 +2169,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
21682169

21692170
ds->pebs_index = ds->pebs_buffer_base;
21702171

2171-
mask = (1ULL << x86_pmu.max_pebs_events) - 1;
2172-
size = x86_pmu.max_pebs_events;
2172+
mask = x86_pmu.pebs_events_mask;
2173+
size = max_pebs_events;
21732174
if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
21742175
mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
21752176
size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
@@ -2208,8 +2209,9 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
22082209
pebs_status = p->status = cpuc->pebs_enabled;
22092210

22102211
bit = find_first_bit((unsigned long *)&pebs_status,
2211-
x86_pmu.max_pebs_events);
2212-
if (bit >= x86_pmu.max_pebs_events)
2212+
max_pebs_events);
2213+
2214+
if (!(x86_pmu.pebs_events_mask & (1 << bit)))
22132215
continue;
22142216

22152217
/*
@@ -2267,7 +2269,6 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
22672269
{
22682270
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
22692271
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2270-
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
22712272
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
22722273
struct debug_store *ds = cpuc->ds;
22732274
struct perf_event *event;
@@ -2283,7 +2284,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
22832284

22842285
ds->pebs_index = ds->pebs_buffer_base;
22852286

2286-
mask = ((1ULL << max_pebs_events) - 1) |
2287+
mask = hybrid(cpuc->pmu, pebs_events_mask) |
22872288
(((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
22882289
size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
22892290

arch/x86/events/perf_event.h

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ struct x86_hybrid_pmu {
684684
cpumask_t supported_cpus;
685685
union perf_capabilities intel_cap;
686686
u64 intel_ctrl;
687-
int max_pebs_events;
687+
u64 pebs_events_mask;
688688
int num_counters;
689689
int num_counters_fixed;
690690
struct event_constraint unconstrained;
@@ -852,7 +852,7 @@ struct x86_pmu {
852852
pebs_ept :1;
853853
int pebs_record_size;
854854
int pebs_buffer_size;
855-
int max_pebs_events;
855+
u64 pebs_events_mask;
856856
void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
857857
struct event_constraint *pebs_constraints;
858858
void (*pebs_aliases)(struct perf_event *event);
@@ -1661,6 +1661,17 @@ static inline int is_ht_workaround_enabled(void)
16611661
return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
16621662
}
16631663

1664+
static inline u64 intel_pmu_pebs_mask(u64 cntr_mask)
1665+
{
1666+
return MAX_PEBS_EVENTS_MASK & cntr_mask;
1667+
}
1668+
1669+
static inline int intel_pmu_max_num_pebs(struct pmu *pmu)
1670+
{
1671+
static_assert(MAX_PEBS_EVENTS == 32);
1672+
return fls((u32)hybrid(pmu, pebs_events_mask));
1673+
}
1674+
16641675
#else /* CONFIG_CPU_SUP_INTEL */
16651676

16661677
static inline void reserve_ds_buffers(void)

arch/x86/include/asm/intel_ds.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
/* The maximal number of PEBS events: */
1010
#define MAX_PEBS_EVENTS_FMT4 8
1111
#define MAX_PEBS_EVENTS 32
12+
#define MAX_PEBS_EVENTS_MASK GENMASK_ULL(MAX_PEBS_EVENTS - 1, 0)
1213
#define MAX_FIXED_PEBS_EVENTS 16
1314

1415
/*

0 commit comments

Comments
 (0)