Skip to content

Commit be6b067

Browse files
committed
KVM: x86/pmu: Add common define to capture fixed counters offset
Add a common define to "officially" solidify KVM's split of counters, i.e. to commit to using bits 31:0 to track general purpose counters and bits 63:32 to track fixed counters (which only Intel supports). KVM already bleeds this behavior all over common PMU code, and adding a KVM- defined macro allows clarifying that the value is a _base_, as oppposed to the _flag_ that is used to access fixed PMCs via RDPMC (which perf confusingly calls INTEL_PMC_FIXED_RDPMC_BASE). No functional change intended. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent f933b88 commit be6b067

File tree

3 files changed

+13
-11
lines changed

3 files changed

+13
-11
lines changed

arch/x86/kvm/pmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
6767
* all perf counters (both gp and fixed). The mapping relationship
6868
* between pmc and perf counters is as the following:
6969
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
70-
* [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
70+
* [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
7171
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
7272
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
7373
*/
@@ -411,7 +411,7 @@ static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
411411
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
412412
int idx)
413413
{
414-
int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
414+
int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX;
415415

416416
if (filter->action == KVM_PMU_EVENT_DENY &&
417417
test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
@@ -465,7 +465,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)
465465

466466
if (pmc_is_fixed(pmc)) {
467467
fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
468-
pmc->idx - INTEL_PMC_IDX_FIXED);
468+
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
469469
if (fixed_ctr_ctrl & 0x1)
470470
eventsel |= ARCH_PERFMON_EVENTSEL_OS;
471471
if (fixed_ctr_ctrl & 0x2)
@@ -841,7 +841,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
841841
select_user = config & ARCH_PERFMON_EVENTSEL_USR;
842842
} else {
843843
config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
844-
pmc->idx - INTEL_PMC_IDX_FIXED);
844+
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
845845
select_os = config & 0x1;
846846
select_user = config & 0x2;
847847
}

arch/x86/kvm/pmu.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
1919
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
2020

21+
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
22+
2123
struct kvm_pmu_ops {
2224
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
2325
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
@@ -130,7 +132,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
130132

131133
if (pmc_is_fixed(pmc))
132134
return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
133-
pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
135+
pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
134136

135137
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
136138
}

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,18 +50,18 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
5050

5151
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
5252

53-
__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53+
__set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
5454
kvm_pmu_request_counter_reprogram(pmc);
5555
}
5656
}
5757

5858
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
5959
{
60-
if (pmc_idx < INTEL_PMC_IDX_FIXED) {
60+
if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) {
6161
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
6262
MSR_P6_EVNTSEL0);
6363
} else {
64-
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
64+
u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX;
6565

6666
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
6767
}
@@ -516,7 +516,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
516516
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
517517
pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
518518
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
519-
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
519+
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
520520
pmu->global_ctrl_mask = counter_mask;
521521

522522
/*
@@ -560,7 +560,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
560560
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
561561
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
562562
pmu->fixed_ctr_ctrl_mask &=
563-
~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
563+
~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
564564
}
565565
pmu->pebs_data_cfg_mask = ~0xff00000full;
566566
} else {
@@ -586,7 +586,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
586586
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
587587
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
588588
pmu->fixed_counters[i].vcpu = vcpu;
589-
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
589+
pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
590590
pmu->fixed_counters[i].current_config = 0;
591591
pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
592592
}

0 commit comments

Comments
 (0)