Skip to content

Commit de527b1

Browse files
Jinrong Liangsean-jc
authored andcommitted
KVM: selftests: Introduce "struct __kvm_pmu_event_filter" to manipulate filter
Add custom "__kvm_pmu_event_filter" structure to improve pmu event filter settings. Simplifies event filter setup by organizing event filter parameters in a cleaner, more organized way. Alternatively, selftests could use a struct overlay ala vcpu_set_msr() to avoid dynamically allocating the array: struct { struct kvm_msrs header; struct kvm_msr_entry entry; } buffer = {}; memset(&buffer, 0, sizeof(buffer)); buffer.header.nmsrs = 1; buffer.entry.index = msr_index; buffer.entry.data = msr_value; but the extra layer added by the nested structs is counterproductive to writing efficient, clean code. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Jinrong Liang <[email protected]> Link: https://lore.kernel.org/r/[email protected] [sean: massage changelog to explain alternative] Signed-off-by: Sean Christopherson <[email protected]>
1 parent c853be2 commit de527b1

File tree

1 file changed

+90
-92
lines changed

1 file changed

+90
-92
lines changed

tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

Lines changed: 90 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828

2929
#define NUM_BRANCHES 42
3030

31+
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
32+
#define MAX_FILTER_EVENTS 300
33+
#define MAX_TEST_EVENTS 10
34+
3135
/*
3236
* This is how the event selector and unit mask are stored in an AMD
3337
* core performance event-select register. Intel's format is similar,
@@ -69,21 +73,33 @@
6973

7074
#define INST_RETIRED EVENT(0xc0, 0)
7175

76+
struct __kvm_pmu_event_filter {
77+
__u32 action;
78+
__u32 nevents;
79+
__u32 fixed_counter_bitmap;
80+
__u32 flags;
81+
__u32 pad[4];
82+
__u64 events[MAX_FILTER_EVENTS];
83+
};
84+
7285
/*
7386
* This event list comprises Intel's eight architectural events plus
7487
* AMD's "retired branch instructions" for Zen[123] (and possibly
7588
* other AMD CPUs).
7689
*/
77-
static const uint64_t event_list[] = {
78-
EVENT(0x3c, 0),
79-
INST_RETIRED,
80-
EVENT(0x3c, 1),
81-
EVENT(0x2e, 0x4f),
82-
EVENT(0x2e, 0x41),
83-
EVENT(0xc4, 0),
84-
EVENT(0xc5, 0),
85-
EVENT(0xa4, 1),
86-
AMD_ZEN_BR_RETIRED,
90+
static const struct __kvm_pmu_event_filter base_event_filter = {
91+
.nevents = ARRAY_SIZE(base_event_filter.events),
92+
.events = {
93+
EVENT(0x3c, 0),
94+
INST_RETIRED,
95+
EVENT(0x3c, 1),
96+
EVENT(0x2e, 0x4f),
97+
EVENT(0x2e, 0x41),
98+
EVENT(0xc4, 0),
99+
EVENT(0xc5, 0),
100+
EVENT(0xa4, 1),
101+
AMD_ZEN_BR_RETIRED,
102+
},
87103
};
88104

89105
struct {
@@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
225241
return !r;
226242
}
227243

228-
static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
229-
{
230-
struct kvm_pmu_event_filter *f;
231-
int size = sizeof(*f) + nevents * sizeof(f->events[0]);
232-
233-
f = malloc(size);
234-
TEST_ASSERT(f, "Out of memory");
235-
memset(f, 0, size);
236-
f->nevents = nevents;
237-
return f;
238-
}
239-
240-
241-
static struct kvm_pmu_event_filter *
242-
create_pmu_event_filter(const uint64_t event_list[], int nevents,
243-
uint32_t action, uint32_t flags)
244-
{
245-
struct kvm_pmu_event_filter *f;
246-
int i;
247-
248-
f = alloc_pmu_event_filter(nevents);
249-
f->action = action;
250-
f->flags = flags;
251-
for (i = 0; i < nevents; i++)
252-
f->events[i] = event_list[i];
253-
254-
return f;
255-
}
256-
257-
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
258-
{
259-
return create_pmu_event_filter(event_list,
260-
ARRAY_SIZE(event_list),
261-
action, 0);
262-
}
263-
264244
/*
265245
* Remove the first occurrence of 'event' (if any) from the filter's
266246
* event list.
267247
*/
268-
static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
248+
static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
269249
{
270250
bool found = false;
271251
int i;
@@ -313,66 +293,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
313293
}
314294

315295
static void test_with_filter(struct kvm_vcpu *vcpu,
316-
struct kvm_pmu_event_filter *f)
296+
struct __kvm_pmu_event_filter *__f)
317297
{
298+
struct kvm_pmu_event_filter *f = (void *)__f;
299+
318300
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
319301
run_vcpu_and_sync_pmc_results(vcpu);
320302
}
321303

322304
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
323305
{
324-
uint64_t event = EVENT(0x1C2, 0);
325-
struct kvm_pmu_event_filter *f;
306+
struct __kvm_pmu_event_filter f = {
307+
.action = KVM_PMU_EVENT_DENY,
308+
.nevents = 1,
309+
.events = {
310+
EVENT(0x1C2, 0),
311+
},
312+
};
326313

327-
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
328-
test_with_filter(vcpu, f);
329-
free(f);
314+
test_with_filter(vcpu, &f);
330315

331316
ASSERT_PMC_COUNTING_INSTRUCTIONS();
332317
}
333318

334319
static void test_member_deny_list(struct kvm_vcpu *vcpu)
335320
{
336-
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
321+
struct __kvm_pmu_event_filter f = base_event_filter;
337322

338-
test_with_filter(vcpu, f);
339-
free(f);
323+
f.action = KVM_PMU_EVENT_DENY;
324+
test_with_filter(vcpu, &f);
340325

341326
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
342327
}
343328

344329
static void test_member_allow_list(struct kvm_vcpu *vcpu)
345330
{
346-
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
331+
struct __kvm_pmu_event_filter f = base_event_filter;
347332

348-
test_with_filter(vcpu, f);
349-
free(f);
333+
f.action = KVM_PMU_EVENT_ALLOW;
334+
test_with_filter(vcpu, &f);
350335

351336
ASSERT_PMC_COUNTING_INSTRUCTIONS();
352337
}
353338

354339
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
355340
{
356-
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
341+
struct __kvm_pmu_event_filter f = base_event_filter;
357342

358-
remove_event(f, INST_RETIRED);
359-
remove_event(f, INTEL_BR_RETIRED);
360-
remove_event(f, AMD_ZEN_BR_RETIRED);
361-
test_with_filter(vcpu, f);
362-
free(f);
343+
f.action = KVM_PMU_EVENT_DENY;
344+
345+
remove_event(&f, INST_RETIRED);
346+
remove_event(&f, INTEL_BR_RETIRED);
347+
remove_event(&f, AMD_ZEN_BR_RETIRED);
348+
test_with_filter(vcpu, &f);
363349

364350
ASSERT_PMC_COUNTING_INSTRUCTIONS();
365351
}
366352

367353
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
368354
{
369-
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
355+
struct __kvm_pmu_event_filter f = base_event_filter;
356+
357+
f.action = KVM_PMU_EVENT_ALLOW;
370358

371-
remove_event(f, INST_RETIRED);
372-
remove_event(f, INTEL_BR_RETIRED);
373-
remove_event(f, AMD_ZEN_BR_RETIRED);
374-
test_with_filter(vcpu, f);
375-
free(f);
359+
remove_event(&f, INST_RETIRED);
360+
remove_event(&f, INTEL_BR_RETIRED);
361+
remove_event(&f, AMD_ZEN_BR_RETIRED);
362+
test_with_filter(vcpu, &f);
376363

377364
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
378365
}
@@ -567,19 +554,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
567554
const uint64_t masked_events[],
568555
const int nmasked_events)
569556
{
570-
struct kvm_pmu_event_filter *f;
557+
struct __kvm_pmu_event_filter f = {
558+
.nevents = nmasked_events,
559+
.action = KVM_PMU_EVENT_ALLOW,
560+
.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
561+
};
571562

572-
f = create_pmu_event_filter(masked_events, nmasked_events,
573-
KVM_PMU_EVENT_ALLOW,
574-
KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
575-
test_with_filter(vcpu, f);
576-
free(f);
563+
memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
564+
test_with_filter(vcpu, &f);
577565
}
578566

579-
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
580-
#define MAX_FILTER_EVENTS 300
581-
#define MAX_TEST_EVENTS 10
582-
583567
#define ALLOW_LOADS BIT(0)
584568
#define ALLOW_STORES BIT(1)
585569
#define ALLOW_LOADS_STORES BIT(2)
@@ -751,17 +735,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
751735
run_masked_events_tests(vcpu, events, nevents);
752736
}
753737

754-
static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
755-
int nevents, uint32_t flags)
738+
static int set_pmu_event_filter(struct kvm_vcpu *vcpu,
739+
struct __kvm_pmu_event_filter *__f)
756740
{
757-
struct kvm_pmu_event_filter *f;
758-
int r;
741+
struct kvm_pmu_event_filter *f = (void *)__f;
759742

760-
f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
761-
r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
762-
free(f);
743+
return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
744+
}
745+
746+
static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
747+
uint32_t flags, uint32_t action)
748+
{
749+
struct __kvm_pmu_event_filter f = {
750+
.nevents = 1,
751+
.flags = flags,
752+
.action = action,
753+
.events = {
754+
event,
755+
},
756+
};
763757

764-
return r;
758+
return set_pmu_event_filter(vcpu, &f);
765759
}
766760

767761
static void test_filter_ioctl(struct kvm_vcpu *vcpu)
@@ -773,14 +767,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
773767
* Unfortunately having invalid bits set in event data is expected to
774768
* pass when flags == 0 (bits other than eventsel+umask).
775769
*/
776-
r = run_filter_test(vcpu, &e, 1, 0);
770+
r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
777771
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
778772

779-
r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
773+
r = set_pmu_single_event_filter(vcpu, e,
774+
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
775+
KVM_PMU_EVENT_ALLOW);
780776
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
781777

782778
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
783-
r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
779+
r = set_pmu_single_event_filter(vcpu, e,
780+
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
781+
KVM_PMU_EVENT_ALLOW);
784782
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
785783
}
786784

0 commit comments

Comments
 (0)