Skip to content

Commit 32f55e4

Browse files
committed
KVM: nVMX: Request immediate exit iff pending nested event needs injection
When requesting an immediate exit from L2 in order to inject a pending event, do so only if the pending event actually requires manual injection, i.e. if and only if KVM actually needs to regain control in order to deliver the event. Avoiding the "immediate exit" isn't simply an optimization, it's necessary to make forward progress, as the "already expired" VMX preemption timer trick that KVM uses to force a VM-Exit has higher priority than events that aren't directly injected. At present time, this is a glorified nop as all events processed by vmx_has_nested_events() require injection, but that will not hold true in the future, e.g. if there's a pending virtual interrupt in vmcs02.RVI. I.e. if KVM is trying to deliver a virtual interrupt to L2, the expired VMX preemption timer will trigger VM-Exit before the virtual interrupt is delivered, and KVM will effectively hang the vCPU in an endless loop of forced immediate VM-Exits (because the pending virtual interrupt never goes away). Cc: [email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent d83c36d commit 32f55e4

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1823,7 +1823,7 @@ struct kvm_x86_nested_ops {
18231823
bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
18241824
u32 error_code);
18251825
int (*check_events)(struct kvm_vcpu *vcpu);
1826-
bool (*has_events)(struct kvm_vcpu *vcpu);
1826+
bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
18271827
void (*triple_fault)(struct kvm_vcpu *vcpu);
18281828
int (*get_state)(struct kvm_vcpu *vcpu,
18291829
struct kvm_nested_state __user *user_kvm_nested_state,

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4032,7 +4032,7 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
40324032
to_vmx(vcpu)->nested.preemption_timer_expired;
40334033
}
40344034

4035-
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
4035+
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
40364036
{
40374037
return nested_vmx_preemption_timer_pending(vcpu) ||
40384038
to_vmx(vcpu)->nested.mtf_pending;

arch/x86/kvm/x86.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10516,7 +10516,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
1051610516

1051710517
if (is_guest_mode(vcpu) &&
1051810518
kvm_x86_ops.nested_ops->has_events &&
10519-
kvm_x86_ops.nested_ops->has_events(vcpu))
10519+
kvm_x86_ops.nested_ops->has_events(vcpu, true))
1052010520
*req_immediate_exit = true;
1052110521

1052210522
/*
@@ -13157,7 +13157,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
1315713157

1315813158
if (is_guest_mode(vcpu) &&
1315913159
kvm_x86_ops.nested_ops->has_events &&
13160-
kvm_x86_ops.nested_ops->has_events(vcpu))
13160+
kvm_x86_ops.nested_ops->has_events(vcpu, false))
1316113161
return true;
1316213162

1316313163
if (kvm_xen_has_pending_events(vcpu))

0 commit comments

Comments
 (0)