28
28
29
29
#define NUM_BRANCHES 42
30
30
31
+ /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
32
+ #define MAX_FILTER_EVENTS 300
33
+ #define MAX_TEST_EVENTS 10
34
+
31
35
/*
32
36
* This is how the event selector and unit mask are stored in an AMD
33
37
* core performance event-select register. Intel's format is similar,
69
73
70
74
#define INST_RETIRED EVENT(0xc0, 0)
71
75
76
+ struct __kvm_pmu_event_filter {
77
+ __u32 action ;
78
+ __u32 nevents ;
79
+ __u32 fixed_counter_bitmap ;
80
+ __u32 flags ;
81
+ __u32 pad [4 ];
82
+ __u64 events [MAX_FILTER_EVENTS ];
83
+ };
84
+
72
85
/*
73
86
* This event list comprises Intel's eight architectural events plus
74
87
* AMD's "retired branch instructions" for Zen[123] (and possibly
75
88
* other AMD CPUs).
76
89
*/
77
- static const uint64_t event_list [] = {
78
- EVENT (0x3c , 0 ),
79
- INST_RETIRED ,
80
- EVENT (0x3c , 1 ),
81
- EVENT (0x2e , 0x4f ),
82
- EVENT (0x2e , 0x41 ),
83
- EVENT (0xc4 , 0 ),
84
- EVENT (0xc5 , 0 ),
85
- EVENT (0xa4 , 1 ),
86
- AMD_ZEN_BR_RETIRED ,
90
+ static const struct __kvm_pmu_event_filter base_event_filter = {
91
+ .nevents = ARRAY_SIZE (base_event_filter .events ),
92
+ .events = {
93
+ EVENT (0x3c , 0 ),
94
+ INST_RETIRED ,
95
+ EVENT (0x3c , 1 ),
96
+ EVENT (0x2e , 0x4f ),
97
+ EVENT (0x2e , 0x41 ),
98
+ EVENT (0xc4 , 0 ),
99
+ EVENT (0xc5 , 0 ),
100
+ EVENT (0xa4 , 1 ),
101
+ AMD_ZEN_BR_RETIRED ,
102
+ },
87
103
};
88
104
89
105
struct {
@@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
225
241
return !r ;
226
242
}
227
243
228
- static struct kvm_pmu_event_filter * alloc_pmu_event_filter (uint32_t nevents )
229
- {
230
- struct kvm_pmu_event_filter * f ;
231
- int size = sizeof (* f ) + nevents * sizeof (f -> events [0 ]);
232
-
233
- f = malloc (size );
234
- TEST_ASSERT (f , "Out of memory" );
235
- memset (f , 0 , size );
236
- f -> nevents = nevents ;
237
- return f ;
238
- }
239
-
240
-
241
- static struct kvm_pmu_event_filter *
242
- create_pmu_event_filter (const uint64_t event_list [], int nevents ,
243
- uint32_t action , uint32_t flags )
244
- {
245
- struct kvm_pmu_event_filter * f ;
246
- int i ;
247
-
248
- f = alloc_pmu_event_filter (nevents );
249
- f -> action = action ;
250
- f -> flags = flags ;
251
- for (i = 0 ; i < nevents ; i ++ )
252
- f -> events [i ] = event_list [i ];
253
-
254
- return f ;
255
- }
256
-
257
- static struct kvm_pmu_event_filter * event_filter (uint32_t action )
258
- {
259
- return create_pmu_event_filter (event_list ,
260
- ARRAY_SIZE (event_list ),
261
- action , 0 );
262
- }
263
-
264
244
/*
265
245
* Remove the first occurrence of 'event' (if any) from the filter's
266
246
* event list.
267
247
*/
268
- static void remove_event (struct kvm_pmu_event_filter * f , uint64_t event )
248
+ static void remove_event (struct __kvm_pmu_event_filter * f , uint64_t event )
269
249
{
270
250
bool found = false;
271
251
int i ;
@@ -313,66 +293,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
313
293
}
314
294
315
295
static void test_with_filter (struct kvm_vcpu * vcpu ,
316
- struct kvm_pmu_event_filter * f )
296
+ struct __kvm_pmu_event_filter * __f )
317
297
{
298
+ struct kvm_pmu_event_filter * f = (void * )__f ;
299
+
318
300
vm_ioctl (vcpu -> vm , KVM_SET_PMU_EVENT_FILTER , f );
319
301
run_vcpu_and_sync_pmc_results (vcpu );
320
302
}
321
303
322
304
static void test_amd_deny_list (struct kvm_vcpu * vcpu )
323
305
{
324
- uint64_t event = EVENT (0x1C2 , 0 );
325
- struct kvm_pmu_event_filter * f ;
306
+ struct __kvm_pmu_event_filter f = {
307
+ .action = KVM_PMU_EVENT_DENY ,
308
+ .nevents = 1 ,
309
+ .events = {
310
+ EVENT (0x1C2 , 0 ),
311
+ },
312
+ };
326
313
327
- f = create_pmu_event_filter (& event , 1 , KVM_PMU_EVENT_DENY , 0 );
328
- test_with_filter (vcpu , f );
329
- free (f );
314
+ test_with_filter (vcpu , & f );
330
315
331
316
ASSERT_PMC_COUNTING_INSTRUCTIONS ();
332
317
}
333
318
334
319
static void test_member_deny_list (struct kvm_vcpu * vcpu )
335
320
{
336
- struct kvm_pmu_event_filter * f = event_filter ( KVM_PMU_EVENT_DENY ) ;
321
+ struct __kvm_pmu_event_filter f = base_event_filter ;
337
322
338
- test_with_filter ( vcpu , f ) ;
339
- free ( f );
323
+ f . action = KVM_PMU_EVENT_DENY ;
324
+ test_with_filter ( vcpu , & f );
340
325
341
326
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
342
327
}
343
328
344
329
static void test_member_allow_list (struct kvm_vcpu * vcpu )
345
330
{
346
- struct kvm_pmu_event_filter * f = event_filter ( KVM_PMU_EVENT_ALLOW ) ;
331
+ struct __kvm_pmu_event_filter f = base_event_filter ;
347
332
348
- test_with_filter ( vcpu , f ) ;
349
- free ( f );
333
+ f . action = KVM_PMU_EVENT_ALLOW ;
334
+ test_with_filter ( vcpu , & f );
350
335
351
336
ASSERT_PMC_COUNTING_INSTRUCTIONS ();
352
337
}
353
338
354
339
static void test_not_member_deny_list (struct kvm_vcpu * vcpu )
355
340
{
356
- struct kvm_pmu_event_filter * f = event_filter ( KVM_PMU_EVENT_DENY ) ;
341
+ struct __kvm_pmu_event_filter f = base_event_filter ;
357
342
358
- remove_event (f , INST_RETIRED );
359
- remove_event (f , INTEL_BR_RETIRED );
360
- remove_event (f , AMD_ZEN_BR_RETIRED );
361
- test_with_filter (vcpu , f );
362
- free (f );
343
+ f .action = KVM_PMU_EVENT_DENY ;
344
+
345
+ remove_event (& f , INST_RETIRED );
346
+ remove_event (& f , INTEL_BR_RETIRED );
347
+ remove_event (& f , AMD_ZEN_BR_RETIRED );
348
+ test_with_filter (vcpu , & f );
363
349
364
350
ASSERT_PMC_COUNTING_INSTRUCTIONS ();
365
351
}
366
352
367
353
static void test_not_member_allow_list (struct kvm_vcpu * vcpu )
368
354
{
369
- struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_ALLOW );
355
+ struct __kvm_pmu_event_filter f = base_event_filter ;
356
+
357
+ f .action = KVM_PMU_EVENT_ALLOW ;
370
358
371
- remove_event (f , INST_RETIRED );
372
- remove_event (f , INTEL_BR_RETIRED );
373
- remove_event (f , AMD_ZEN_BR_RETIRED );
374
- test_with_filter (vcpu , f );
375
- free (f );
359
+ remove_event (& f , INST_RETIRED );
360
+ remove_event (& f , INTEL_BR_RETIRED );
361
+ remove_event (& f , AMD_ZEN_BR_RETIRED );
362
+ test_with_filter (vcpu , & f );
376
363
377
364
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
378
365
}
@@ -567,19 +554,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
567
554
const uint64_t masked_events [],
568
555
const int nmasked_events )
569
556
{
570
- struct kvm_pmu_event_filter * f ;
557
+ struct __kvm_pmu_event_filter f = {
558
+ .nevents = nmasked_events ,
559
+ .action = KVM_PMU_EVENT_ALLOW ,
560
+ .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS ,
561
+ };
571
562
572
- f = create_pmu_event_filter (masked_events , nmasked_events ,
573
- KVM_PMU_EVENT_ALLOW ,
574
- KVM_PMU_EVENT_FLAG_MASKED_EVENTS );
575
- test_with_filter (vcpu , f );
576
- free (f );
563
+ memcpy (f .events , masked_events , sizeof (uint64_t ) * nmasked_events );
564
+ test_with_filter (vcpu , & f );
577
565
}
578
566
579
- /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
580
- #define MAX_FILTER_EVENTS 300
581
- #define MAX_TEST_EVENTS 10
582
-
583
567
#define ALLOW_LOADS BIT(0)
584
568
#define ALLOW_STORES BIT(1)
585
569
#define ALLOW_LOADS_STORES BIT(2)
@@ -751,17 +735,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
751
735
run_masked_events_tests (vcpu , events , nevents );
752
736
}
753
737
754
- static int run_filter_test (struct kvm_vcpu * vcpu , const uint64_t * events ,
755
- int nevents , uint32_t flags )
738
+ static int set_pmu_event_filter (struct kvm_vcpu * vcpu ,
739
+ struct __kvm_pmu_event_filter * __f )
756
740
{
757
- struct kvm_pmu_event_filter * f ;
758
- int r ;
741
+ struct kvm_pmu_event_filter * f = (void * )__f ;
759
742
760
- f = create_pmu_event_filter (events , nevents , KVM_PMU_EVENT_ALLOW , flags );
761
- r = __vm_ioctl (vcpu -> vm , KVM_SET_PMU_EVENT_FILTER , f );
762
- free (f );
743
+ return __vm_ioctl (vcpu -> vm , KVM_SET_PMU_EVENT_FILTER , f );
744
+ }
745
+
746
+ static int set_pmu_single_event_filter (struct kvm_vcpu * vcpu , uint64_t event ,
747
+ uint32_t flags , uint32_t action )
748
+ {
749
+ struct __kvm_pmu_event_filter f = {
750
+ .nevents = 1 ,
751
+ .flags = flags ,
752
+ .action = action ,
753
+ .events = {
754
+ event ,
755
+ },
756
+ };
763
757
764
- return r ;
758
+ return set_pmu_event_filter ( vcpu , & f ) ;
765
759
}
766
760
767
761
static void test_filter_ioctl (struct kvm_vcpu * vcpu )
@@ -773,14 +767,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
773
767
* Unfortunately having invalid bits set in event data is expected to
774
768
* pass when flags == 0 (bits other than eventsel+umask).
775
769
*/
776
- r = run_filter_test (vcpu , & e , 1 , 0 );
770
+ r = set_pmu_single_event_filter (vcpu , e , 0 , KVM_PMU_EVENT_ALLOW );
777
771
TEST_ASSERT (r == 0 , "Valid PMU Event Filter is failing" );
778
772
779
- r = run_filter_test (vcpu , & e , 1 , KVM_PMU_EVENT_FLAG_MASKED_EVENTS );
773
+ r = set_pmu_single_event_filter (vcpu , e ,
774
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS ,
775
+ KVM_PMU_EVENT_ALLOW );
780
776
TEST_ASSERT (r != 0 , "Invalid PMU Event Filter is expected to fail" );
781
777
782
778
e = KVM_PMU_ENCODE_MASKED_ENTRY (0xff , 0xff , 0xff , 0xf );
783
- r = run_filter_test (vcpu , & e , 1 , KVM_PMU_EVENT_FLAG_MASKED_EVENTS );
779
+ r = set_pmu_single_event_filter (vcpu , e ,
780
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS ,
781
+ KVM_PMU_EVENT_ALLOW );
784
782
TEST_ASSERT (r == 0 , "Valid PMU Event Filter is failing" );
785
783
}
786
784
0 commit comments