Skip to content

Commit c390241

Browse files
sandip4nPeter Zijlstra
authored andcommitted
perf/x86/amd/uncore: Add PerfMonV2 DF event format
If AMD Performance Monitoring Version 2 (PerfMonV2) is supported, use bits 0-7, 32-37 as EventSelect and bits 8-15, 24-27 as UnitMask for Data Fabric (DF) events. Signed-off-by: Sandipan Das <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/ffc24d5a3375b1d6e457d88e83241114de5c1942.1652954372.git.sandipan.das@amd.com
1 parent 16b48c3 commit c390241

File tree

2 files changed

+30
-7
lines changed

2 files changed

+30
-7
lines changed

arch/x86/events/amd/uncore.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -209,10 +209,14 @@ static int amd_uncore_event_init(struct perf_event *event)
209209
{
210210
struct amd_uncore *uncore;
211211
struct hw_perf_event *hwc = &event->hw;
212+
u64 event_mask = AMD64_RAW_EVENT_MASK_NB;
212213

213214
if (event->attr.type != event->pmu->type)
214215
return -ENOENT;
215216

217+
if (pmu_version >= 2 && is_nb_event(event))
218+
event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB;
219+
216220
/*
217221
* NB and Last level cache counters (MSRs) are shared across all cores
218222
* that share the same NB / Last level cache. On family 16h and below,
@@ -221,7 +225,7 @@ static int amd_uncore_event_init(struct perf_event *event)
221225
* out. So we do not support sampling and per-thread events via
222226
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
223227
*/
224-
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
228+
hwc->config = event->attr.config & event_mask;
225229
hwc->idx = -1;
226230

227231
if (event->cpu < 0)
@@ -300,8 +304,10 @@ static struct device_attribute format_attr_##_var = \
300304

301305
DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35");
302306
DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */
307+
DEFINE_UNCORE_FORMAT_ATTR(event14v2, event, "config:0-7,32-37"); /* PerfMonV2 DF */
303308
DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */
304-
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
309+
DEFINE_UNCORE_FORMAT_ATTR(umask8, umask, "config:8-15");
310+
DEFINE_UNCORE_FORMAT_ATTR(umask12, umask, "config:8-15,24-27"); /* PerfMonV2 DF */
305311
DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */
306312
DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */
307313
DEFINE_UNCORE_FORMAT_ATTR(threadmask8, threadmask, "config:56-63"); /* F17h L3 */
@@ -313,14 +319,14 @@ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */
313319
/* Common DF and NB attributes */
314320
static struct attribute *amd_uncore_df_format_attr[] = {
315321
&format_attr_event12.attr, /* event */
316-
&format_attr_umask.attr, /* umask */
322+
&format_attr_umask8.attr, /* umask */
317323
NULL,
318324
};
319325

320326
/* Common L2 and L3 attributes */
321327
static struct attribute *amd_uncore_l3_format_attr[] = {
322328
&format_attr_event12.attr, /* event */
323-
&format_attr_umask.attr, /* umask */
329+
&format_attr_umask8.attr, /* umask */
324330
NULL, /* threadmask */
325331
NULL,
326332
};
@@ -659,8 +665,12 @@ static int __init amd_uncore_init(void)
659665
}
660666

661667
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
662-
if (boot_cpu_data.x86 >= 0x17)
668+
if (pmu_version >= 2) {
669+
*df_attr++ = &format_attr_event14v2.attr;
670+
*df_attr++ = &format_attr_umask12.attr;
671+
} else if (boot_cpu_data.x86 >= 0x17) {
663672
*df_attr = &format_attr_event14.attr;
673+
}
664674

665675
amd_uncore_nb = alloc_percpu(struct amd_uncore *);
666676
if (!amd_uncore_nb) {
@@ -686,11 +696,11 @@ static int __init amd_uncore_init(void)
686696
if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
687697
if (boot_cpu_data.x86 >= 0x19) {
688698
*l3_attr++ = &format_attr_event8.attr;
689-
*l3_attr++ = &format_attr_umask.attr;
699+
*l3_attr++ = &format_attr_umask8.attr;
690700
*l3_attr++ = &format_attr_threadmask2.attr;
691701
} else if (boot_cpu_data.x86 >= 0x17) {
692702
*l3_attr++ = &format_attr_event8.attr;
693-
*l3_attr++ = &format_attr_umask.attr;
703+
*l3_attr++ = &format_attr_umask8.attr;
694704
*l3_attr++ = &format_attr_threadmask8.attr;
695705
}
696706

arch/x86/include/asm/perf_event.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,19 @@
8989
#define AMD64_RAW_EVENT_MASK_NB \
9090
(AMD64_EVENTSEL_EVENT | \
9191
ARCH_PERFMON_EVENTSEL_UMASK)
92+
93+
#define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \
94+
(AMD64_EVENTSEL_EVENT | \
95+
GENMASK_ULL(37, 36))
96+
97+
#define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \
98+
(ARCH_PERFMON_EVENTSEL_UMASK | \
99+
GENMASK_ULL(27, 24))
100+
101+
#define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \
102+
(AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \
103+
AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
104+
92105
#define AMD64_NUM_COUNTERS 4
93106
#define AMD64_NUM_COUNTERS_CORE 6
94107
#define AMD64_NUM_COUNTERS_NB 4

0 commit comments

Comments
 (0)