Skip to content

Commit 4df6856

Browse files
committed
KVM: x86: Update retry protection fields when forcing retry on emulation failure
When retrying the faulting instruction after emulation failure, refresh the infinite loop protection fields even if no shadow pages were zapped, i.e. avoid hitting an infinite loop even when retrying the instruction as a last-ditch effort to avoid terminating the guest. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent dabc4ff commit 4df6856

File tree

3 files changed

+17
-7
lines changed

3 files changed

+17
-7
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2133,7 +2133,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
21332133
void kvm_update_dr7(struct kvm_vcpu *vcpu);
21342134

21352135
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
2136-
bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa);
2136+
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2137+
bool always_retry);
2138+
2139+
static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
2140+
gpa_t cr2_or_gpa)
2141+
{
2142+
return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
2143+
}
2144+
21372145
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
21382146
ulong roots_to_free);
21392147
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2713,10 +2713,11 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
27132713
return r;
27142714
}
27152715

2716-
bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
2716+
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2717+
bool always_retry)
27172718
{
27182719
gpa_t gpa = cr2_or_gpa;
2719-
bool r;
2720+
bool r = false;
27202721

27212722
/*
27222723
* Bail early if there aren't any write-protected shadow pages to avoid
@@ -2727,16 +2728,17 @@ bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
27272728
* skipping the unprotect+retry path, which is also an optimization.
27282729
*/
27292730
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
2730-
return false;
2731+
goto out;
27312732

27322733
if (!vcpu->arch.mmu->root_role.direct) {
27332734
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
27342735
if (gpa == INVALID_GPA)
2735-
return false;
2736+
goto out;
27362737
}
27372738

27382739
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
2739-
if (r) {
2740+
out:
2741+
if (r || always_retry) {
27402742
vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
27412743
vcpu->arch.last_retry_addr = cr2_or_gpa;
27422744
}

arch/x86/kvm/x86.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8886,7 +8886,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
88868886
* guest to let the CPU re-execute the instruction in the hope that the
88878887
* CPU can cleanly execute the instruction that KVM failed to emulate.
88888888
*/
8889-
kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa);
8889+
__kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
88908890

88918891
/*
88928892
* Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible

0 commit comments

Comments
 (0)