Skip to content

Commit 7e8f1aa

Browse files
sean-jcbonzini
authored andcommitted
KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest
Convert MIPS to kvm_faultin_pfn()+kvm_release_faultin_page(), which are new APIs to consolidate arch code and provide consistent behavior across all KVM architectures. Signed-off-by: Sean Christopherson <[email protected]> Tested-by: Dmitry Osipenko <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Message-ID: <[email protected]>
1 parent 13d66fd commit 7e8f1aa

File tree

1 file changed

+6
-8
lines changed

1 file changed

+6
-8
lines changed

arch/mips/kvm/mmu.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
557557
bool writeable;
558558
unsigned long prot_bits;
559559
unsigned long mmu_seq;
560+
struct page *page;
560561

561562
/* Try the fast path to handle old / clean pages */
562563
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -578,7 +579,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
578579
mmu_seq = kvm->mmu_invalidate_seq;
579580
/*
580581
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
581-
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
582+
* in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
582583
* risk the page we get a reference to getting unmapped before we have a
583584
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
584585
*
@@ -590,7 +591,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
590591
smp_rmb();
591592

592593
/* Slow path - ask KVM core whether we can access this GPA */
593-
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
594+
pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
594595
if (is_error_noslot_pfn(pfn)) {
595596
err = -EFAULT;
596597
goto out;
@@ -602,10 +603,10 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
602603
/*
603604
* This can happen when mappings are changed asynchronously, but
604605
* also synchronously if a COW is triggered by
605-
* gfn_to_pfn_prot().
606+
* kvm_faultin_pfn().
606607
*/
607608
spin_unlock(&kvm->mmu_lock);
608-
kvm_release_pfn_clean(pfn);
609+
kvm_release_page_unused(page);
609610
goto retry;
610611
}
611612

@@ -632,10 +633,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
632633
if (out_buddy)
633634
*out_buddy = *ptep_buddy(ptep);
634635

635-
if (writeable)
636-
kvm_set_pfn_dirty(pfn);
637-
kvm_release_pfn_clean(pfn);
638-
636+
kvm_release_faultin_page(kvm, page, false, writeable);
639637
spin_unlock(&kvm->mmu_lock);
640638
out:
641639
srcu_read_unlock(&kvm->srcu, srcu_idx);

0 commit comments

Comments
 (0)