Skip to content

Commit 040537c

Browse files
sean-jcbonzini
authored andcommitted
KVM: PPC: Explicitly require struct page memory for Ultravisor sharing
Explicitly require "struct page" memory when sharing memory between guest and host via an Ultravisor. Given the number of pfn_to_page() calls in the code, it's safe to assume that KVM already requires that the pfn returned by gfn_to_pfn() is backed by struct page, i.e. this is likely a bug fix, not a reduction in KVM capabilities. Switching to gfn_to_page() will eventually allow removing gfn_to_pfn() and kvm_pfn_to_refcounted_page(). Signed-off-by: Sean Christopherson <[email protected]> Tested-by: Dmitry Osipenko <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Message-ID: <[email protected]>
1 parent 570d666 commit 040537c

File tree

1 file changed

+12
-13
lines changed

1 file changed

+12
-13
lines changed

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -879,9 +879,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
879879
{
880880

881881
int ret = H_PARAMETER;
882-
struct page *uvmem_page;
882+
struct page *page, *uvmem_page;
883883
struct kvmppc_uvmem_page_pvt *pvt;
884-
unsigned long pfn;
885884
unsigned long gfn = gpa >> page_shift;
886885
int srcu_idx;
887886
unsigned long uvmem_pfn;
@@ -901,8 +900,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
901900

902901
retry:
903902
mutex_unlock(&kvm->arch.uvmem_lock);
904-
pfn = gfn_to_pfn(kvm, gfn);
905-
if (is_error_noslot_pfn(pfn))
903+
page = gfn_to_page(kvm, gfn);
904+
if (!page)
906905
goto out;
907906

908907
mutex_lock(&kvm->arch.uvmem_lock);
@@ -911,16 +910,16 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
911910
pvt = uvmem_page->zone_device_data;
912911
pvt->skip_page_out = true;
913912
pvt->remove_gfn = false; /* it continues to be a valid GFN */
914-
kvm_release_pfn_clean(pfn);
913+
kvm_release_page_unused(page);
915914
goto retry;
916915
}
917916

918-
if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
917+
if (!uv_page_in(kvm->arch.lpid, page_to_pfn(page) << page_shift, gpa, 0,
919918
page_shift)) {
920919
kvmppc_gfn_shared(gfn, kvm);
921920
ret = H_SUCCESS;
922921
}
923-
kvm_release_pfn_clean(pfn);
922+
kvm_release_page_clean(page);
924923
mutex_unlock(&kvm->arch.uvmem_lock);
925924
out:
926925
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -1083,21 +1082,21 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
10831082

10841083
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
10851084
{
1086-
unsigned long pfn;
1085+
struct page *page;
10871086
int ret = U_SUCCESS;
10881087

1089-
pfn = gfn_to_pfn(kvm, gfn);
1090-
if (is_error_noslot_pfn(pfn))
1088+
page = gfn_to_page(kvm, gfn);
1089+
if (!page)
10911090
return -EFAULT;
10921091

10931092
mutex_lock(&kvm->arch.uvmem_lock);
10941093
if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
10951094
goto out;
10961095

1097-
ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1098-
0, PAGE_SHIFT);
1096+
ret = uv_page_in(kvm->arch.lpid, page_to_pfn(page) << PAGE_SHIFT,
1097+
gfn << PAGE_SHIFT, 0, PAGE_SHIFT);
10991098
out:
1100-
kvm_release_pfn_clean(pfn);
1099+
kvm_release_page_clean(page);
11011100
mutex_unlock(&kvm->arch.uvmem_lock);
11021101
return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
11031102
}

0 commit comments

Comments
 (0)