11
11
*/
12
12
13
13
#define _GNU_SOURCE /* for program_invocation_short_name */
14
- #include "test_util.h"
14
+
15
15
#include "kvm_util.h"
16
+ #include "pmu.h"
16
17
#include "processor.h"
17
-
18
- /*
19
- * In lieu of copying perf_event.h into tools...
20
- */
21
- #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
22
- #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
23
-
24
- /* End of stuff taken from perf_event.h. */
25
-
26
- /* Oddly, this isn't in perf_event.h. */
27
- #define ARCH_PERFMON_BRANCHES_RETIRED 5
18
+ #include "test_util.h"
28
19
29
20
#define NUM_BRANCHES 42
30
- #define INTEL_PMC_IDX_FIXED 32
31
-
32
- /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
33
- #define MAX_FILTER_EVENTS 300
34
21
#define MAX_TEST_EVENTS 10
35
22
36
23
#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
37
24
#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
38
- #define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
39
-
40
- /*
41
- * This is how the event selector and unit mask are stored in an AMD
42
- * core performance event-select register. Intel's format is similar,
43
- * but the event selector is only 8 bits.
44
- */
45
- #define EVENT (select , umask ) ((select & 0xf00UL) << 24 | (select & 0xff) | \
46
- (umask & 0xff) << 8)
47
-
48
- /*
49
- * "Branch instructions retired", from the Intel SDM, volume 3,
50
- * "Pre-defined Architectural Performance Events."
51
- */
52
-
53
- #define INTEL_BR_RETIRED EVENT(0xc4, 0)
54
-
55
- /*
56
- * "Retired branch instructions", from Processor Programming Reference
57
- * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
58
- * Preliminary Processor Programming Reference (PPR) for AMD Family
59
- * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
60
- * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
61
- * B1 Processors Volume 1 of 2.
62
- */
63
-
64
- #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
65
-
66
-
67
- /*
68
- * "Retired instructions", from Processor Programming Reference
69
- * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
70
- * Preliminary Processor Programming Reference (PPR) for AMD Family
71
- * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
72
- * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
73
- * B1 Processors Volume 1 of 2.
74
- * --- and ---
75
- * "Instructions retired", from the Intel SDM, volume 3,
76
- * "Pre-defined Architectural Performance Events."
77
- */
78
-
79
- #define INST_RETIRED EVENT(0xc0, 0)
25
+ #define PMU_EVENT_FILTER_INVALID_NEVENTS (KVM_PMU_EVENT_FILTER_MAX_EVENTS + 1)
80
26
81
27
struct __kvm_pmu_event_filter {
82
28
__u32 action ;
83
29
__u32 nevents ;
84
30
__u32 fixed_counter_bitmap ;
85
31
__u32 flags ;
86
32
__u32 pad [4 ];
87
- __u64 events [MAX_FILTER_EVENTS ];
33
+ __u64 events [KVM_PMU_EVENT_FILTER_MAX_EVENTS ];
88
34
};
89
35
90
36
/*
91
- * This event list comprises Intel's eight architectural events plus
92
- * AMD's "retired branch instructions" for Zen[123] (and possibly
93
- * other AMD CPUs) .
37
+ * This event list comprises Intel's known architectural events, plus AMD's
38
+ * "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs).
39
+ * Note, AMD and Intel use the same encoding for instructions retired .
94
40
*/
41
+ kvm_static_assert (INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED );
42
+
95
43
static const struct __kvm_pmu_event_filter base_event_filter = {
96
44
.nevents = ARRAY_SIZE (base_event_filter .events ),
97
45
.events = {
98
- EVENT ( 0x3c , 0 ) ,
99
- INST_RETIRED ,
100
- EVENT ( 0x3c , 1 ) ,
101
- EVENT ( 0x2e , 0x4f ) ,
102
- EVENT ( 0x2e , 0x41 ) ,
103
- EVENT ( 0xc4 , 0 ) ,
104
- EVENT ( 0xc5 , 0 ) ,
105
- EVENT ( 0xa4 , 1 ) ,
106
- AMD_ZEN_BR_RETIRED ,
46
+ INTEL_ARCH_CPU_CYCLES ,
47
+ INTEL_ARCH_INSTRUCTIONS_RETIRED ,
48
+ INTEL_ARCH_REFERENCE_CYCLES ,
49
+ INTEL_ARCH_LLC_REFERENCES ,
50
+ INTEL_ARCH_LLC_MISSES ,
51
+ INTEL_ARCH_BRANCHES_RETIRED ,
52
+ INTEL_ARCH_BRANCHES_MISPREDICTED ,
53
+ INTEL_ARCH_TOPDOWN_SLOTS ,
54
+ AMD_ZEN_BRANCHES_RETIRED ,
107
55
},
108
56
};
109
57
@@ -165,9 +113,9 @@ static void intel_guest_code(void)
165
113
for (;;) {
166
114
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
167
115
wrmsr (MSR_P6_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
168
- ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED );
116
+ ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_BRANCHES_RETIRED );
169
117
wrmsr (MSR_P6_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
170
- ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
118
+ ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_INSTRUCTIONS_RETIRED );
171
119
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x3 );
172
120
173
121
run_and_measure_loop (MSR_IA32_PMC0 );
@@ -189,9 +137,9 @@ static void amd_guest_code(void)
189
137
for (;;) {
190
138
wrmsr (MSR_K7_EVNTSEL0 , 0 );
191
139
wrmsr (MSR_K7_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
192
- ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED );
140
+ ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BRANCHES_RETIRED );
193
141
wrmsr (MSR_K7_EVNTSEL1 , ARCH_PERFMON_EVENTSEL_ENABLE |
194
- ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED );
142
+ ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_INSTRUCTIONS_RETIRED );
195
143
196
144
run_and_measure_loop (MSR_K7_PERFCTR0 );
197
145
GUEST_SYNC (0 );
@@ -312,7 +260,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu)
312
260
.action = KVM_PMU_EVENT_DENY ,
313
261
.nevents = 1 ,
314
262
.events = {
315
- EVENT (0x1C2 , 0 ),
263
+ RAW_EVENT (0x1C2 , 0 ),
316
264
},
317
265
};
318
266
@@ -347,9 +295,9 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
347
295
348
296
f .action = KVM_PMU_EVENT_DENY ;
349
297
350
- remove_event (& f , INST_RETIRED );
351
- remove_event (& f , INTEL_BR_RETIRED );
352
- remove_event (& f , AMD_ZEN_BR_RETIRED );
298
+ remove_event (& f , INTEL_ARCH_INSTRUCTIONS_RETIRED );
299
+ remove_event (& f , INTEL_ARCH_BRANCHES_RETIRED );
300
+ remove_event (& f , AMD_ZEN_BRANCHES_RETIRED );
353
301
test_with_filter (vcpu , & f );
354
302
355
303
ASSERT_PMC_COUNTING_INSTRUCTIONS ();
@@ -361,9 +309,9 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
361
309
362
310
f .action = KVM_PMU_EVENT_ALLOW ;
363
311
364
- remove_event (& f , INST_RETIRED );
365
- remove_event (& f , INTEL_BR_RETIRED );
366
- remove_event (& f , AMD_ZEN_BR_RETIRED );
312
+ remove_event (& f , INTEL_ARCH_INSTRUCTIONS_RETIRED );
313
+ remove_event (& f , INTEL_ARCH_BRANCHES_RETIRED );
314
+ remove_event (& f , AMD_ZEN_BRANCHES_RETIRED );
367
315
test_with_filter (vcpu , & f );
368
316
369
317
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
@@ -452,9 +400,9 @@ static bool use_amd_pmu(void)
452
400
* - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
453
401
*/
454
402
#define MEM_INST_RETIRED 0xD0
455
- #define MEM_INST_RETIRED_LOAD EVENT (MEM_INST_RETIRED, 0x81)
456
- #define MEM_INST_RETIRED_STORE EVENT (MEM_INST_RETIRED, 0x82)
457
- #define MEM_INST_RETIRED_LOAD_STORE EVENT (MEM_INST_RETIRED, 0x83)
403
+ #define MEM_INST_RETIRED_LOAD RAW_EVENT (MEM_INST_RETIRED, 0x81)
404
+ #define MEM_INST_RETIRED_STORE RAW_EVENT (MEM_INST_RETIRED, 0x82)
405
+ #define MEM_INST_RETIRED_LOAD_STORE RAW_EVENT (MEM_INST_RETIRED, 0x83)
458
406
459
407
static bool supports_event_mem_inst_retired (void )
460
408
{
@@ -486,9 +434,9 @@ static bool supports_event_mem_inst_retired(void)
486
434
* B1 Processors Volume 1 of 2.
487
435
*/
488
436
#define LS_DISPATCH 0x29
489
- #define LS_DISPATCH_LOAD EVENT (LS_DISPATCH, BIT(0))
490
- #define LS_DISPATCH_STORE EVENT (LS_DISPATCH, BIT(1))
491
- #define LS_DISPATCH_LOAD_STORE EVENT (LS_DISPATCH, BIT(2))
437
+ #define LS_DISPATCH_LOAD RAW_EVENT (LS_DISPATCH, BIT(0))
438
+ #define LS_DISPATCH_STORE RAW_EVENT (LS_DISPATCH, BIT(1))
439
+ #define LS_DISPATCH_LOAD_STORE RAW_EVENT (LS_DISPATCH, BIT(2))
492
440
493
441
#define INCLUDE_MASKED_ENTRY (event_select , mask , match ) \
494
442
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
@@ -729,14 +677,14 @@ static void add_dummy_events(uint64_t *events, int nevents)
729
677
730
678
static void test_masked_events (struct kvm_vcpu * vcpu )
731
679
{
732
- int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS ;
733
- uint64_t events [MAX_FILTER_EVENTS ];
680
+ int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS ;
681
+ uint64_t events [KVM_PMU_EVENT_FILTER_MAX_EVENTS ];
734
682
735
683
/* Run the test cases against a sparse PMU event filter. */
736
684
run_masked_events_tests (vcpu , events , 0 );
737
685
738
686
/* Run the test cases against a dense PMU event filter. */
739
- add_dummy_events (events , MAX_FILTER_EVENTS );
687
+ add_dummy_events (events , KVM_PMU_EVENT_FILTER_MAX_EVENTS );
740
688
run_masked_events_tests (vcpu , events , nevents );
741
689
}
742
690
@@ -809,20 +757,19 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
809
757
TEST_ASSERT (!r , "Masking non-existent fixed counters should be allowed" );
810
758
}
811
759
812
- static void intel_run_fixed_counter_guest_code (uint8_t fixed_ctr_idx )
760
+ static void intel_run_fixed_counter_guest_code (uint8_t idx )
813
761
{
814
762
for (;;) {
815
763
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
816
- wrmsr (MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx , 0 );
764
+ wrmsr (MSR_CORE_PERF_FIXED_CTR0 + idx , 0 );
817
765
818
766
/* Only OS_EN bit is enabled for fixed counter[idx]. */
819
- wrmsr (MSR_CORE_PERF_FIXED_CTR_CTRL , BIT_ULL (4 * fixed_ctr_idx ));
820
- wrmsr (MSR_CORE_PERF_GLOBAL_CTRL ,
821
- BIT_ULL (INTEL_PMC_IDX_FIXED + fixed_ctr_idx ));
767
+ wrmsr (MSR_CORE_PERF_FIXED_CTR_CTRL , FIXED_PMC_CTRL (idx , FIXED_PMC_KERNEL ));
768
+ wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , FIXED_PMC_GLOBAL_CTRL_ENABLE (idx ));
822
769
__asm__ __volatile__("loop ." : "+c" ((int ){NUM_BRANCHES }));
823
770
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
824
771
825
- GUEST_SYNC (rdmsr (MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx ));
772
+ GUEST_SYNC (rdmsr (MSR_CORE_PERF_FIXED_CTR0 + idx ));
826
773
}
827
774
}
828
775
0 commit comments