Skip to content

Commit 37c4dbf

Browse files
committed
KVM: x86: check PIR even for vCPUs with disabled APICv
The IRTE for an assigned device can trigger a POSTED_INTR_VECTOR even if APICv is disabled on the vCPU that receives it. In that case, the interrupt will just cause a vmexit and leave the ON bit set together with the PIR bit corresponding to the interrupt. Right now, the interrupt would not be delivered until APICv is re-enabled. However, fixing this is just a matter of always doing the PIR->IRR synchronization, even if the vCPU has temporarily disabled APICv. This is not a problem for performance, or if anything it is an improvement. First, in the common case where vcpu->arch.apicv_active is true, one fewer check has to be performed. Second, static_call_cond will elide the function call if APICv is not present or disabled. Finally, in the case for AMD hardware we can remove the sync_pir_to_irr callback: it is only needed for apic_has_interrupt_for_ppr, and that function already has a fallback for !APICv. Cc: [email protected] Co-developed-by: Sean Christopherson <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Reviewed-by: Maxim Levitsky <[email protected]> Reviewed-by: David Matlack <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 7e1901f commit 37c4dbf

File tree

3 files changed

+10
-11
lines changed

3 files changed

+10
-11
lines changed

arch/x86/kvm/lapic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
707707
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
708708
{
709709
int highest_irr;
710-
if (apic->vcpu->arch.apicv_active)
710+
if (kvm_x86_ops.sync_pir_to_irr)
711711
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
712712
else
713713
highest_irr = apic_find_highest_irr(apic);

arch/x86/kvm/svm/svm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
46514651
.load_eoi_exitmap = svm_load_eoi_exitmap,
46524652
.hwapic_irr_update = svm_hwapic_irr_update,
46534653
.hwapic_isr_update = svm_hwapic_isr_update,
4654-
.sync_pir_to_irr = kvm_lapic_find_highest_irr,
46554654
.apicv_post_state_restore = avic_post_state_restore,
46564655

46574656
.set_tss_addr = svm_set_tss_addr,

arch/x86/kvm/x86.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4472,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
44724472
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
44734473
struct kvm_lapic_state *s)
44744474
{
4475-
if (vcpu->arch.apicv_active)
4476-
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
4475+
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
44774476

44784477
return kvm_apic_get_state(vcpu, s);
44794478
}
@@ -9571,8 +9570,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
95719570
if (irqchip_split(vcpu->kvm))
95729571
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
95739572
else {
9574-
if (vcpu->arch.apicv_active)
9575-
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
9573+
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
95769574
if (ioapic_in_kernel(vcpu->kvm))
95779575
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
95789576
}
@@ -9842,10 +9840,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
98429840

98439841
/*
98449842
* This handles the case where a posted interrupt was
9845-
* notified with kvm_vcpu_kick.
9843+
* notified with kvm_vcpu_kick. Assigned devices can
9844+
* use the POSTED_INTR_VECTOR even if APICv is disabled,
9845+
* so do it even if APICv is disabled on this vCPU.
98469846
*/
9847-
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
9848-
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
9847+
if (kvm_lapic_enabled(vcpu))
9848+
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
98499849

98509850
if (kvm_vcpu_exit_request(vcpu)) {
98519851
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9889,8 +9889,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
98899889
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
98909890
break;
98919891

9892-
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
9893-
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
9892+
if (kvm_lapic_enabled(vcpu))
9893+
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
98949894

98959895
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
98969896
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;

0 commit comments

Comments
 (0)