Skip to content

Commit 3e26b82

Browse files
Jinrong Liangsean-jc
authored andcommitted
KVM: selftests: Test Intel PMU architectural events on fixed counters
Extend the PMU counters test to validate architectural events using fixed counters. The core logic is largely the same, the biggest difference being that if a fixed counter exists, its associated event is available (the SDM doesn't explicitly state this to be true, but it's KVM's ABI and letting software program a fixed counter that doesn't actually count would be quite bizarre). Note, fixed counters rely on PERF_GLOBAL_CTRL. Reviewed-by: Jim Mattson <[email protected]> Reviewed-by: Dapeng Mi <[email protected]> Co-developed-by: Like Xu <[email protected]> Signed-off-by: Like Xu <[email protected]> Signed-off-by: Jinrong Liang <[email protected]> Co-developed-by: Sean Christopherson <[email protected]> Tested-by: Dapeng Mi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 4f1bd6b commit 3e26b82

File tree

1 file changed

+45
-9
lines changed

1 file changed

+45
-9
lines changed

tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

Lines changed: 45 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -150,26 +150,46 @@ static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature even
150150
guest_assert_event_count(idx, event, pmc, pmc_msr);
151151
}
152152

153+
#define X86_PMU_FEATURE_NULL \
154+
({ \
155+
struct kvm_x86_pmu_feature feature = {}; \
156+
\
157+
feature; \
158+
})
159+
160+
static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
161+
{
162+
return !(*(u64 *)&event);
163+
}
164+
153165
static void guest_test_arch_event(uint8_t idx)
154166
{
155167
const struct {
156168
struct kvm_x86_pmu_feature gp_event;
169+
struct kvm_x86_pmu_feature fixed_event;
157170
} intel_event_to_feature[] = {
158-
[INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES },
159-
[INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED },
160-
[INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES },
161-
[INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES },
162-
[INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES },
163-
[INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED },
164-
[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED },
165-
[INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS },
171+
[INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
172+
[INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
173+
/*
174+
* Note, the fixed counter for reference cycles is NOT the same
175+
* as the general purpose architectural event. The fixed counter
176+
* explicitly counts at the same frequency as the TSC, whereas
177+
* the GP event counts at a fixed, but uarch specific, frequency.
178+
* Bundle them here for simplicity.
179+
*/
180+
[INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
181+
[INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL },
182+
[INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL },
183+
[INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
184+
[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
185+
[INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
166186
};
167187

168188
uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
169189
uint32_t pmu_version = guest_get_pmu_version();
170190
/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
171191
bool guest_has_perf_global_ctrl = pmu_version >= 2;
172-
struct kvm_x86_pmu_feature gp_event;
192+
struct kvm_x86_pmu_feature gp_event, fixed_event;
173193
uint32_t base_pmc_msr;
174194
unsigned int i;
175195

@@ -199,6 +219,22 @@ static void guest_test_arch_event(uint8_t idx)
199219
__guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i,
200220
MSR_P6_EVNTSEL0 + i, eventsel);
201221
}
222+
223+
if (!guest_has_perf_global_ctrl)
224+
return;
225+
226+
fixed_event = intel_event_to_feature[idx].fixed_event;
227+
if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event))
228+
return;
229+
230+
i = fixed_event.f.bit;
231+
232+
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
233+
234+
__guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED,
235+
MSR_CORE_PERF_FIXED_CTR0 + i,
236+
MSR_CORE_PERF_GLOBAL_CTRL,
237+
FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
202238
}
203239

204240
static void guest_test_arch_events(void)

0 commit comments

Comments
 (0)