Skip to content

Commit 01dd4d3

Browse files
committed
KVM: x86/mmu: Apply retry protection to "fast nTDP unprotect" path
Move the anti-infinite-loop protection provided by last_retry_{eip,addr} into kvm_mmu_write_protect_fault() so that it guards unprotect+retry that never hits the emulator, as well as reexecute_instruction(), which is the last ditch "might as well try it" logic that kicks in when emulation fails on an instruction that faulted on a write-protected gfn. Add a new helper, kvm_mmu_unprotect_gfn_and_retry(), to set the retry fields and deduplicate other code (with more to come). Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 9c19129 commit 01dd4d3

File tree

3 files changed

+40
-27
lines changed

3 files changed

+40
-27
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2133,6 +2133,7 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
21332133
void kvm_update_dr7(struct kvm_vcpu *vcpu);
21342134

21352135
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
2136+
bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa);
21362137
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
21372138
ulong roots_to_free);
21382139
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2713,6 +2713,22 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
27132713
return r;
27142714
}
27152715

2716+
bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
2717+
{
2718+
gpa_t gpa = cr2_or_gpa;
2719+
bool r;
2720+
2721+
if (!vcpu->arch.mmu->root_role.direct)
2722+
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
2723+
2724+
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
2725+
if (r) {
2726+
vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
2727+
vcpu->arch.last_retry_addr = cr2_or_gpa;
2728+
}
2729+
return r;
2730+
}
2731+
27162732
static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
27172733
{
27182734
gpa_t gpa;
@@ -5956,6 +5972,27 @@ static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
59565972
{
59575973
bool direct = vcpu->arch.mmu->root_role.direct;
59585974

5975+
/*
5976+
* Do not try to unprotect and retry if the vCPU re-faulted on the same
5977+
* RIP with the same address that was previously unprotected, as doing
5978+
* so will likely put the vCPU into an infinite. E.g. if the vCPU uses
5979+
* a non-page-table modifying instruction on the PDE that points to the
5980+
* instruction, then unprotecting the gfn will unmap the instruction's
5981+
* code, i.e. make it impossible for the instruction to ever complete.
5982+
*/
5983+
if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
5984+
vcpu->arch.last_retry_addr == cr2_or_gpa)
5985+
return RET_PF_EMULATE;
5986+
5987+
/*
5988+
* Reset the unprotect+retry values that guard against infinite loops.
5989+
* The values will be refreshed if KVM explicitly unprotects a gfn and
5990+
* retries, in all other cases it's safe to retry in the future even if
5991+
* the next page fault happens on the same RIP+address.
5992+
*/
5993+
vcpu->arch.last_retry_eip = 0;
5994+
vcpu->arch.last_retry_addr = 0;
5995+
59595996
/*
59605997
* Before emulating the instruction, check to see if the access was due
59615998
* to a read-only violation while the CPU was walking non-nested NPT
@@ -5986,7 +6023,7 @@ static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
59866023
* format) with L2's page tables (EPT format).
59876024
*/
59886025
if (direct && is_write_to_guest_page_table(error_code) &&
5989-
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
6026+
kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
59906027
return RET_PF_RETRY;
59916028

59926029
/*

arch/x86/kvm/x86.c

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -8932,27 +8932,13 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
89328932
gpa_t cr2_or_gpa, int emulation_type)
89338933
{
89348934
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8935-
unsigned long last_retry_eip, last_retry_addr;
8936-
gpa_t gpa = cr2_or_gpa;
8937-
8938-
last_retry_eip = vcpu->arch.last_retry_eip;
8939-
last_retry_addr = vcpu->arch.last_retry_addr;
89408935

89418936
/*
89428937
* If the emulation is caused by #PF and it is non-page_table
89438938
* writing instruction, it means the VM-EXIT is caused by shadow
89448939
* page protected, we can zap the shadow page and retry this
89458940
* instruction directly.
8946-
*
8947-
* Note: if the guest uses a non-page-table modifying instruction
8948-
* on the PDE that points to the instruction, then we will unmap
8949-
* the instruction and go to an infinite loop. So, we cache the
8950-
* last retried eip and the last fault address, if we meet the eip
8951-
* and the address again, we can break out of the potential infinite
8952-
* loop.
89538941
*/
8954-
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
8955-
89568942
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
89578943
return false;
89588944

@@ -8963,18 +8949,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
89638949
if (x86_page_table_writing_insn(ctxt))
89648950
return false;
89658951

8966-
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
8967-
return false;
8968-
8969-
if (!vcpu->arch.mmu->root_role.direct)
8970-
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
8971-
8972-
if (!kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
8973-
return false;
8974-
8975-
vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
8976-
vcpu->arch.last_retry_addr = cr2_or_gpa;
8977-
return true;
8952+
return kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa);
89788953
}
89798954

89808955
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)