Skip to content

Commit 6b3dcab

Browse files
committed
KVM: x86/mmu: Subsume kvm_mmu_unprotect_page() into the and_retry() version
Fold kvm_mmu_unprotect_page() into kvm_mmu_unprotect_gfn_and_retry() now that all other direct usage is gone. No functional change intended. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 2876624 commit 6b3dcab

File tree

2 files changed

+13
-21
lines changed

2 files changed

+13
-21
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2132,7 +2132,6 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
21322132

21332133
void kvm_update_dr7(struct kvm_vcpu *vcpu);
21342134

2135-
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
21362135
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
21372136
bool always_retry);
21382137

arch/x86/kvm/mmu/mmu.c

Lines changed: 13 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2695,27 +2695,12 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
26952695
write_unlock(&kvm->mmu_lock);
26962696
}
26972697

2698-
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2699-
{
2700-
struct kvm_mmu_page *sp;
2701-
LIST_HEAD(invalid_list);
2702-
int r;
2703-
2704-
r = 0;
2705-
write_lock(&kvm->mmu_lock);
2706-
for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2707-
r = 1;
2708-
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2709-
}
2710-
kvm_mmu_commit_zap_page(kvm, &invalid_list);
2711-
write_unlock(&kvm->mmu_lock);
2712-
2713-
return r;
2714-
}
2715-
27162698
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
27172699
bool always_retry)
27182700
{
2701+
struct kvm *kvm = vcpu->kvm;
2702+
LIST_HEAD(invalid_list);
2703+
struct kvm_mmu_page *sp;
27192704
gpa_t gpa = cr2_or_gpa;
27202705
bool r = false;
27212706

@@ -2727,7 +2712,7 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
27272712
* positive is benign, and a false negative will simply result in KVM
27282713
* skipping the unprotect+retry path, which is also an optimization.
27292714
*/
2730-
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
2715+
if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
27312716
goto out;
27322717

27332718
if (!vcpu->arch.mmu->root_role.direct) {
@@ -2736,7 +2721,15 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
27362721
goto out;
27372722
}
27382723

2739-
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
2724+
r = false;
2725+
write_lock(&kvm->mmu_lock);
2726+
for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa)) {
2727+
r = true;
2728+
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2729+
}
2730+
kvm_mmu_commit_zap_page(kvm, &invalid_list);
2731+
write_unlock(&kvm->mmu_lock);
2732+
27402733
out:
27412734
if (r || always_retry) {
27422735
vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);

0 commit comments

Comments
 (0)