Skip to content

Commit 73554b2

Browse files
jsmattsonjrsean-jc
authored andcommitted
KVM: x86/pmu: Synthesize at most one PMI per VM-exit
When the irq_work callback, kvm_pmi_trigger_fn(), is invoked during a VM-exit that also invokes __kvm_perf_overflow() as a result of instruction emulation, kvm_pmu_deliver_pmi() will be called twice before the next VM-entry. Calling kvm_pmu_deliver_pmi() twice is unlikely to be problematic now that KVM sets the LVTPC mask bit when delivering a PMI. But using IRQ work to trigger the PMI is still broken, albeit very theoretically. E.g. if the self-IPI to trigger IRQ work is be delayed long enough for the vCPU to be migrated to a different pCPU, then it's possible for kvm_pmi_trigger_fn() to race with the kvm_pmu_deliver_pmi() from KVM_REQ_PMI and still generate two PMIs. KVM could set the mask bit using an atomic operation, but that'd just be piling on unnecessary code to workaround what is effectively a hack. The *only* reason KVM uses IRQ work is to ensure the PMI is treated as a wake event, e.g. if the vCPU just executed HLT. Remove the irq_work callback for synthesizing a PMI, and all of the logic for invoking it. Instead, to prevent a vcpu from leaving C0 with a PMI pending, add a check for KVM_REQ_PMI to kvm_vcpu_has_events(). Fixes: 9cd803d ("KVM: x86: Update vPMCs when retiring instructions") Signed-off-by: Jim Mattson <[email protected]> Tested-by: Mingwei Zhang <[email protected]> Tested-by: Dapeng Mi <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Link: https://lore.kernel.org/r/[email protected] [sean: massage changelog] Signed-off-by: Sean Christopherson <[email protected]>
1 parent a16eb25 commit 73554b2

File tree

3 files changed

+4
-27
lines changed

3 files changed

+4
-27
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,6 @@ struct kvm_pmu {
528528
u64 raw_event_mask;
529529
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
530530
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
531-
struct irq_work irq_work;
532531

533532
/*
534533
* Overlay the bitmap with a 64-bit atomic so that all bits can be

arch/x86/kvm/pmu.c

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
9393
#undef __KVM_X86_PMU_OP
9494
}
9595

96-
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
97-
{
98-
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
99-
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
100-
101-
kvm_pmu_deliver_pmi(vcpu);
102-
}
103-
10496
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
10597
{
10698
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
124116
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
125117
}
126118

127-
if (!pmc->intr || skip_pmi)
128-
return;
129-
130-
/*
131-
* Inject PMI. If vcpu was in a guest mode during NMI PMI
132-
* can be ejected on a guest mode re-entry. Otherwise we can't
133-
* be sure that vcpu wasn't executing hlt instruction at the
134-
* time of vmexit and is not going to re-enter guest mode until
135-
* woken up. So we should wake it, but this is impossible from
136-
* NMI context. Do it from irq work instead.
137-
*/
138-
if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
139-
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
140-
else
119+
if (pmc->intr && !skip_pmi)
141120
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
142121
}
143122

@@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
675654

676655
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
677656
{
678-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
679-
680-
irq_work_sync(&pmu->irq_work);
681657
static_call(kvm_x86_pmu_reset)(vcpu);
682658
}
683659

@@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
687663

688664
memset(pmu, 0, sizeof(*pmu));
689665
static_call(kvm_x86_pmu_init)(vcpu);
690-
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
691666
pmu->event_count = 0;
692667
pmu->need_cleanup = false;
693668
kvm_pmu_refresh(vcpu);

arch/x86/kvm/x86.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12843,6 +12843,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
1284312843
return true;
1284412844
#endif
1284512845

12846+
if (kvm_test_request(KVM_REQ_PMI, vcpu))
12847+
return true;
12848+
1284612849
if (kvm_arch_interrupt_allowed(vcpu) &&
1284712850
(kvm_cpu_has_interrupt(vcpu) ||
1284812851
kvm_guest_apic_has_interrupt(vcpu)))

0 commit comments

Comments
 (0)