Skip to content

Commit a997e0f

Browse files
Like Xugregkh
authored andcommitted
KVM: x86/pmu: Fix and isolate TSX-specific performance event logic
[ Upstream commit e644896 ] HSW_IN_TX* bits are used in generic code which are not supported on AMD. Worse, these bits overlap with AMD EventSelect[11:8] and hence using HSW_IN_TX* bits unconditionally in generic code is resulting in unintentional pmu behavior on AMD. For example, if EventSelect[11:8] is 0x2, pmc_reprogram_counter() wrongly assumes that HSW_IN_TX_CHECKPOINTED is set and thus forces sampling period to be 0. Also per the SDM, both bits 32 and 33 "may only be set if the processor supports HLE or RTM" and for "IN_TXCP (bit 33): this bit may only be set for IA32_PERFEVTSEL2." Opportunistically eliminate code redundancy, because if the HSW_IN_TX* bit is set in pmc->eventsel, it is already set in attr.config. Reported-by: Ravi Bangoria <[email protected]> Reported-by: Jim Mattson <[email protected]> Fixes: 103af0a ("perf, kvm: Support the in_tx/in_tx_cp modifiers in KVM arch perfmon emulation v5") Co-developed-by: Ravi Bangoria <[email protected]> Signed-off-by: Ravi Bangoria <[email protected]> Signed-off-by: Like Xu <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent e7bab98 commit a997e0f

File tree

2 files changed

+15
-13
lines changed

2 files changed

+15
-13
lines changed

arch/x86/kvm/pmu.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
9696

9797
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
9898
u64 config, bool exclude_user,
99-
bool exclude_kernel, bool intr,
100-
bool in_tx, bool in_tx_cp)
99+
bool exclude_kernel, bool intr)
101100
{
102101
struct perf_event *event;
103102
struct perf_event_attr attr = {
@@ -113,16 +112,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
113112

114113
attr.sample_period = get_sample_period(pmc, pmc->counter);
115114

116-
if (in_tx)
117-
attr.config |= HSW_IN_TX;
118-
if (in_tx_cp) {
115+
if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
116+
guest_cpuid_is_intel(pmc->vcpu)) {
119117
/*
120118
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
121119
* period. Just clear the sample period so at least
122120
* allocating the counter doesn't fail.
123121
*/
124122
attr.sample_period = 0;
125-
attr.config |= HSW_IN_TX_CHECKPOINTED;
126123
}
127124

128125
event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -229,9 +226,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
229226
pmc_reprogram_counter(pmc, type, config,
230227
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
231228
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
232-
eventsel & ARCH_PERFMON_EVENTSEL_INT,
233-
(eventsel & HSW_IN_TX),
234-
(eventsel & HSW_IN_TX_CHECKPOINTED));
229+
eventsel & ARCH_PERFMON_EVENTSEL_INT);
235230
}
236231
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
237232

@@ -267,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
267262
kvm_x86_ops.pmu_ops->find_fixed_event(idx),
268263
!(en_field & 0x2), /* exclude user */
269264
!(en_field & 0x1), /* exclude kernel */
270-
pmi, false, false);
265+
pmi);
271266
}
272267
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
273268

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
396396
struct kvm_pmc *pmc;
397397
u32 msr = msr_info->index;
398398
u64 data = msr_info->data;
399+
u64 reserved_bits;
399400

400401
switch (msr) {
401402
case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -451,7 +452,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
451452
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
452453
if (data == pmc->eventsel)
453454
return 0;
454-
if (!(data & pmu->reserved_bits)) {
455+
reserved_bits = pmu->reserved_bits;
456+
if ((pmc->idx == 2) &&
457+
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
458+
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
459+
if (!(data & reserved_bits)) {
455460
reprogram_gp_counter(pmc, data);
456461
return 0;
457462
}
@@ -525,8 +530,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
525530
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
526531
if (entry &&
527532
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
528-
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
529-
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
533+
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
534+
pmu->reserved_bits ^= HSW_IN_TX;
535+
pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
536+
}
530537

531538
bitmap_set(pmu->all_valid_pmc_idx,
532539
0, pmu->nr_arch_gp_counters);

0 commit comments

Comments
 (0)