Skip to content

Commit 14d02b7

Browse files
sean-jcbonzini
authored andcommitted
KVM: LoongArch: Use kvm_faultin_pfn() to map pfns into the guest
Convert LoongArch to kvm_faultin_pfn()+kvm_release_faultin_page(), which are new APIs to consolidate arch code and provide consistent behavior across all KVM architectures. Signed-off-by: Sean Christopherson <[email protected]> Tested-by: Dmitry Osipenko <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Message-ID: <[email protected]>
1 parent 35b80f7 commit 14d02b7

File tree

1 file changed

+6
-8
lines changed

1 file changed

+6
-8
lines changed

arch/loongarch/kvm/mmu.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -780,6 +780,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
780780
struct kvm *kvm = vcpu->kvm;
781781
struct kvm_memory_slot *memslot;
782782
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
783+
struct page *page;
783784

784785
/* Try the fast path to handle old / clean pages */
785786
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -807,7 +808,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
807808
mmu_seq = kvm->mmu_invalidate_seq;
808809
/*
809810
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
810-
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
811+
* kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
811812
* risk the page we get a reference to getting unmapped before we have a
812813
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
813814
*
@@ -819,7 +820,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
819820
smp_rmb();
820821

821822
/* Slow path - ask KVM core whether we can access this GPA */
822-
pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
823+
pfn = kvm_faultin_pfn(vcpu, gfn, write, &writeable, &page);
823824
if (is_error_noslot_pfn(pfn)) {
824825
err = -EFAULT;
825826
goto out;
@@ -831,10 +832,10 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
831832
/*
832833
* This can happen when mappings are changed asynchronously, but
833834
* also synchronously if a COW is triggered by
834-
* gfn_to_pfn_prot().
835+
* kvm_faultin_pfn().
835836
*/
836837
spin_unlock(&kvm->mmu_lock);
837-
kvm_release_pfn_clean(pfn);
838+
kvm_release_page_unused(page);
838839
if (retry_no > 100) {
839840
retry_no = 0;
840841
schedule();
@@ -900,10 +901,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
900901
++kvm->stat.pages;
901902
kvm_set_pte(ptep, new_pte);
902903

903-
if (writeable)
904-
kvm_set_pfn_dirty(pfn);
905-
kvm_release_pfn_clean(pfn);
906-
904+
kvm_release_faultin_page(kvm, page, false, writeable);
907905
spin_unlock(&kvm->mmu_lock);
908906

909907
if (prot_bits & _PAGE_DIRTY)

0 commit comments

Comments
 (0)