Skip to content

Commit e8fb5d6

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86: Add config_mask to represent EVENTSEL bitmask
Different vendors may support different fields in EVENTSEL MSR, such as Intel would introduce new fields umask2 and eq bits in EVENTSEL MSR since Perfmon version 6. However, a fixed mask X86_RAW_EVENT_MASK is used to filter the attr.config. Introduce a new config_mask to record the real supported EVENTSEL bitmask. Only apply it to the existing code now. No functional change. Co-developed-by: Dapeng Mi <[email protected]> Signed-off-by: Dapeng Mi <[email protected]> Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Andi Kleen <[email protected]> Reviewed-by: Ian Rogers <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 608f697 commit e8fb5d6

File tree

3 files changed

+12
-1
lines changed

3 files changed

+12
-1
lines changed

arch/x86/events/core.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
624624
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
625625

626626
if (event->attr.type == event->pmu->type)
627-
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
627+
event->hw.config |= x86_pmu_get_event_config(event);
628628

629629
if (event->attr.sample_period && x86_pmu.limit_period) {
630630
s64 left = event->attr.sample_period;
@@ -2098,6 +2098,9 @@ static int __init init_hw_perf_events(void)
20982098
if (!x86_pmu.intel_ctrl)
20992099
x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
21002100

2101+
if (!x86_pmu.config_mask)
2102+
x86_pmu.config_mask = X86_RAW_EVENT_MASK;
2103+
21012104
perf_events_lapic_init();
21022105
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
21032106

arch/x86/events/intel/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6144,6 +6144,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
61446144
pmu->cntr_mask64 = x86_pmu.cntr_mask64;
61456145
pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
61466146
pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
6147+
pmu->config_mask = X86_RAW_EVENT_MASK;
61476148
pmu->unconstrained = (struct event_constraint)
61486149
__EVENT_CONSTRAINT(0, pmu->cntr_mask64,
61496150
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);

arch/x86/events/perf_event.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -695,6 +695,7 @@ struct x86_hybrid_pmu {
695695
union perf_capabilities intel_cap;
696696
u64 intel_ctrl;
697697
u64 pebs_events_mask;
698+
u64 config_mask;
698699
union {
699700
u64 cntr_mask64;
700701
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -790,6 +791,7 @@ struct x86_pmu {
790791
int (*rdpmc_index)(int index);
791792
u64 (*event_map)(int);
792793
int max_events;
794+
u64 config_mask;
793795
union {
794796
u64 cntr_mask64;
795797
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -1241,6 +1243,11 @@ static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
12411243
return fls64(hybrid(pmu, fixed_cntr_mask64));
12421244
}
12431245

1246+
static inline u64 x86_pmu_get_event_config(struct perf_event *event)
1247+
{
1248+
return event->attr.config & hybrid(event->pmu, config_mask);
1249+
}
1250+
12441251
extern struct event_constraint emptyconstraint;
12451252

12461253
extern struct event_constraint unconstrained;

0 commit comments

Comments
 (0)