Skip to content

Commit 309d285

Browse files
tlendackysean-jc
authored andcommitted
KVM: SVM: Fix SNP AP destroy race with VMRUN
An AP destroy request for a target vCPU is typically followed by an RMPADJUST to remove the VMSA attribute from the page currently being used as the VMSA for the target vCPU. This can result in a vCPU that is about to VMRUN to exit with #VMEXIT_INVALID. This usually does not happen as APs are typically sitting in HLT when being destroyed and therefore the vCPU thread is not running at the time. However, if HLT is allowed inside the VM, then the vCPU could be about to VMRUN when the VMSA attribute is removed from the VMSA page, resulting in a #VMEXIT_INVALID when the vCPU actually issues the VMRUN and causing the guest to crash. An RMPADJUST against an in-use (already running) VMSA results in a #NPF for the vCPU issuing the RMPADJUST, so the VMSA attribute cannot be changed until the VMRUN for target vCPU exits. The Qemu command line option '-overcommit cpu-pm=on' is an example of allowing HLT inside the guest. Update the KVM_REQ_UPDATE_PROTECTED_GUEST_STATE event to include the KVM_REQUEST_WAIT flag. The kvm_vcpu_kick() function will not wait for requests to be honored, so create kvm_make_request_and_kick() that will add a new event request and honor the KVM_REQUEST_WAIT flag. This will ensure that the target vCPU sees the AP destroy request before returning to the initiating vCPU should the target vCPU be in guest mode. Fixes: e366f92 ("KVM: SEV: Support SEV-SNP AP Creation NAE event") Signed-off-by: Tom Lendacky <[email protected]> Link: https://lore.kernel.org/r/fe2c885bf35643dd224e91294edb6777d5df23a4.1743097196.git.thomas.lendacky@amd.com [sean: add a comment explaining the use of smp_send_reschedule()] Co-developed-by: Sean Christopherson <[email protected]> Signed-off-by: Sean Christopherson <[email protected]>
1 parent 45eb291 commit 309d285

File tree

4 files changed

+37
-10
lines changed

4 files changed

+37
-10
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,8 @@
125125
KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
126126
#define KVM_REQ_HV_TLB_FLUSH \
127127
KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
128-
#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE KVM_ARCH_REQ(34)
128+
#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \
129+
KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT)
129130

130131
#define CR0_RESERVED_BITS \
131132
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \

arch/x86/kvm/svm/sev.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3988,10 +3988,8 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
39883988
* Unless Creation is deferred until INIT, signal the vCPU to update
39893989
* its state.
39903990
*/
3991-
if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT) {
3992-
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
3993-
kvm_vcpu_kick(target_vcpu);
3994-
}
3991+
if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT)
3992+
kvm_make_request_and_kick(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
39953993

39963994
return 0;
39973995
}

include/linux/kvm_host.h

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1505,7 +1505,16 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
15051505
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
15061506
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
15071507
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1508-
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1508+
1509+
#ifndef CONFIG_S390
1510+
void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
1511+
1512+
static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1513+
{
1514+
__kvm_vcpu_kick(vcpu, false);
1515+
}
1516+
#endif
1517+
15091518
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
15101519
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
15111520

@@ -2253,6 +2262,14 @@ static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
22532262
__kvm_make_request(req, vcpu);
22542263
}
22552264

2265+
#ifndef CONFIG_S390
2266+
static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
2267+
{
2268+
kvm_make_request(req, vcpu);
2269+
__kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
2270+
}
2271+
#endif
2272+
22562273
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
22572274
{
22582275
return READ_ONCE(vcpu->requests);

virt/kvm/kvm_main.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3739,7 +3739,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
37393739
/*
37403740
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
37413741
*/
3742-
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3742+
void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
37433743
{
37443744
int me, cpu;
37453745

@@ -3768,13 +3768,24 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
37683768
*/
37693769
if (kvm_arch_vcpu_should_kick(vcpu)) {
37703770
cpu = READ_ONCE(vcpu->cpu);
3771-
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3772-
smp_send_reschedule(cpu);
3771+
if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
3772+
/*
3773+
* Use a reschedule IPI to kick the vCPU if the caller
3774+
* doesn't need to wait for a response, as KVM allows
3775+
* kicking vCPUs while IRQs are disabled, but using the
3776+
* SMP function call framework with IRQs disabled can
3777+
* deadlock due to taking cross-CPU locks.
3778+
*/
3779+
if (wait)
3780+
smp_call_function_single(cpu, ack_kick, NULL, wait);
3781+
else
3782+
smp_send_reschedule(cpu);
3783+
}
37733784
}
37743785
out:
37753786
put_cpu();
37763787
}
3777-
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3788+
EXPORT_SYMBOL_GPL(__kvm_vcpu_kick);
37783789
#endif /* !CONFIG_S390 */
37793790

37803791
int kvm_vcpu_yield_to(struct kvm_vcpu *target)

0 commit comments

Comments
 (0)