@@ -209,10 +209,14 @@ static int amd_uncore_event_init(struct perf_event *event)
209
209
{
210
210
struct amd_uncore * uncore ;
211
211
struct hw_perf_event * hwc = & event -> hw ;
212
+ u64 event_mask = AMD64_RAW_EVENT_MASK_NB ;
212
213
213
214
if (event -> attr .type != event -> pmu -> type )
214
215
return - ENOENT ;
215
216
217
+ if (pmu_version >= 2 && is_nb_event (event ))
218
+ event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB ;
219
+
216
220
/*
217
221
* NB and Last level cache counters (MSRs) are shared across all cores
218
222
* that share the same NB / Last level cache. On family 16h and below,
@@ -221,7 +225,7 @@ static int amd_uncore_event_init(struct perf_event *event)
221
225
* out. So we do not support sampling and per-thread events via
222
226
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
223
227
*/
224
- hwc -> config = event -> attr .config & AMD64_RAW_EVENT_MASK_NB ;
228
+ hwc -> config = event -> attr .config & event_mask ;
225
229
hwc -> idx = -1 ;
226
230
227
231
if (event -> cpu < 0 )
@@ -300,8 +304,10 @@ static struct device_attribute format_attr_##_var = \
300
304
301
305
DEFINE_UNCORE_FORMAT_ATTR (event12 , event , "config:0-7,32-35" );
302
306
DEFINE_UNCORE_FORMAT_ATTR (event14 , event , "config:0-7,32-35,59-60" ); /* F17h+ DF */
307
+ DEFINE_UNCORE_FORMAT_ATTR (event14v2 , event , "config:0-7,32-37" ); /* PerfMonV2 DF */
303
308
DEFINE_UNCORE_FORMAT_ATTR (event8 , event , "config:0-7" ); /* F17h+ L3 */
304
- DEFINE_UNCORE_FORMAT_ATTR (umask , umask , "config:8-15" );
309
+ DEFINE_UNCORE_FORMAT_ATTR (umask8 , umask , "config:8-15" );
310
+ DEFINE_UNCORE_FORMAT_ATTR (umask12 , umask , "config:8-15,24-27" ); /* PerfMonV2 DF */
305
311
DEFINE_UNCORE_FORMAT_ATTR (coreid , coreid , "config:42-44" ); /* F19h L3 */
306
312
DEFINE_UNCORE_FORMAT_ATTR (slicemask , slicemask , "config:48-51" ); /* F17h L3 */
307
313
DEFINE_UNCORE_FORMAT_ATTR (threadmask8 , threadmask , "config:56-63" ); /* F17h L3 */
@@ -313,14 +319,14 @@ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */
313
319
/* Common DF and NB attributes */
314
320
static struct attribute * amd_uncore_df_format_attr [] = {
315
321
& format_attr_event12 .attr , /* event */
316
- & format_attr_umask .attr , /* umask */
322
+ & format_attr_umask8 .attr , /* umask */
317
323
NULL ,
318
324
};
319
325
320
326
/* Common L2 and L3 attributes */
321
327
static struct attribute * amd_uncore_l3_format_attr [] = {
322
328
& format_attr_event12 .attr , /* event */
323
- & format_attr_umask .attr , /* umask */
329
+ & format_attr_umask8 .attr , /* umask */
324
330
NULL , /* threadmask */
325
331
NULL ,
326
332
};
@@ -659,8 +665,12 @@ static int __init amd_uncore_init(void)
659
665
}
660
666
661
667
if (boot_cpu_has (X86_FEATURE_PERFCTR_NB )) {
662
- if (boot_cpu_data .x86 >= 0x17 )
668
+ if (pmu_version >= 2 ) {
669
+ * df_attr ++ = & format_attr_event14v2 .attr ;
670
+ * df_attr ++ = & format_attr_umask12 .attr ;
671
+ } else if (boot_cpu_data .x86 >= 0x17 ) {
663
672
* df_attr = & format_attr_event14 .attr ;
673
+ }
664
674
665
675
amd_uncore_nb = alloc_percpu (struct amd_uncore * );
666
676
if (!amd_uncore_nb ) {
@@ -686,11 +696,11 @@ static int __init amd_uncore_init(void)
686
696
if (boot_cpu_has (X86_FEATURE_PERFCTR_LLC )) {
687
697
if (boot_cpu_data .x86 >= 0x19 ) {
688
698
* l3_attr ++ = & format_attr_event8 .attr ;
689
- * l3_attr ++ = & format_attr_umask .attr ;
699
+ * l3_attr ++ = & format_attr_umask8 .attr ;
690
700
* l3_attr ++ = & format_attr_threadmask2 .attr ;
691
701
} else if (boot_cpu_data .x86 >= 0x17 ) {
692
702
* l3_attr ++ = & format_attr_event8 .attr ;
693
- * l3_attr ++ = & format_attr_umask .attr ;
703
+ * l3_attr ++ = & format_attr_umask8 .attr ;
694
704
* l3_attr ++ = & format_attr_threadmask8 .attr ;
695
705
}
696
706
0 commit comments