Skip to content

Commit f1b87ea

Browse files
ldu4paulusmack
authored andcommitted
KVM: PPC: Book3S HV: Move kvmppc_svm_page_out up
kvmppc_svm_page_out() will need to be called by kvmppc_uvmem_drop_pages() so move it up earlier in this file. Furthermore it will be interesting to call this function when already holding the kvm->arch.uvmem_lock, so prefix the original function with __ and remove the locking in it, and introduce a wrapper which call that function with the lock held. There is no functional change. Reviewed-by: Bharata B Rao <[email protected]> Signed-off-by: Laurent Dufour <[email protected]> Signed-off-by: Ram Pai <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent a2ce720 commit f1b87ea

File tree

1 file changed

+90
-76
lines changed

1 file changed

+90
-76
lines changed

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 90 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -496,6 +496,96 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
496496
return ret;
497497
}
498498

499+
/*
500+
* Provision a new page on HV side and copy over the contents
501+
* from secure memory using UV_PAGE_OUT uvcall.
502+
* Caller must held kvm->arch.uvmem_lock.
503+
*/
504+
static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
505+
unsigned long start,
506+
unsigned long end, unsigned long page_shift,
507+
struct kvm *kvm, unsigned long gpa)
508+
{
509+
unsigned long src_pfn, dst_pfn = 0;
510+
struct migrate_vma mig;
511+
struct page *dpage, *spage;
512+
struct kvmppc_uvmem_page_pvt *pvt;
513+
unsigned long pfn;
514+
int ret = U_SUCCESS;
515+
516+
memset(&mig, 0, sizeof(mig));
517+
mig.vma = vma;
518+
mig.start = start;
519+
mig.end = end;
520+
mig.src = &src_pfn;
521+
mig.dst = &dst_pfn;
522+
mig.src_owner = &kvmppc_uvmem_pgmap;
523+
524+
/* The requested page is already paged-out, nothing to do */
525+
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
526+
return ret;
527+
528+
ret = migrate_vma_setup(&mig);
529+
if (ret)
530+
return -1;
531+
532+
spage = migrate_pfn_to_page(*mig.src);
533+
if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
534+
goto out_finalize;
535+
536+
if (!is_zone_device_page(spage))
537+
goto out_finalize;
538+
539+
dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
540+
if (!dpage) {
541+
ret = -1;
542+
goto out_finalize;
543+
}
544+
545+
lock_page(dpage);
546+
pvt = spage->zone_device_data;
547+
pfn = page_to_pfn(dpage);
548+
549+
/*
550+
* This function is used in two cases:
551+
* - When HV touches a secure page, for which we do UV_PAGE_OUT
552+
* - When a secure page is converted to shared page, we *get*
553+
* the page to essentially unmap the device page. In this
554+
* case we skip page-out.
555+
*/
556+
if (!pvt->skip_page_out)
557+
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
558+
gpa, 0, page_shift);
559+
560+
if (ret == U_SUCCESS)
561+
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
562+
else {
563+
unlock_page(dpage);
564+
__free_page(dpage);
565+
goto out_finalize;
566+
}
567+
568+
migrate_vma_pages(&mig);
569+
570+
out_finalize:
571+
migrate_vma_finalize(&mig);
572+
return ret;
573+
}
574+
575+
static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
576+
unsigned long start, unsigned long end,
577+
unsigned long page_shift,
578+
struct kvm *kvm, unsigned long gpa)
579+
{
580+
int ret;
581+
582+
mutex_lock(&kvm->arch.uvmem_lock);
583+
ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
584+
mutex_unlock(&kvm->arch.uvmem_lock);
585+
586+
return ret;
587+
}
588+
499589
/*
500590
* Drop device pages that we maintain for the secure guest
501591
*
@@ -866,82 +956,6 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
866956
return ret;
867957
}
868958

869-
/*
870-
* Provision a new page on HV side and copy over the contents
871-
* from secure memory using UV_PAGE_OUT uvcall.
872-
*/
873-
static int kvmppc_svm_page_out(struct vm_area_struct *vma,
874-
unsigned long start,
875-
unsigned long end, unsigned long page_shift,
876-
struct kvm *kvm, unsigned long gpa)
877-
{
878-
unsigned long src_pfn, dst_pfn = 0;
879-
struct migrate_vma mig;
880-
struct page *dpage, *spage;
881-
struct kvmppc_uvmem_page_pvt *pvt;
882-
unsigned long pfn;
883-
int ret = U_SUCCESS;
884-
885-
memset(&mig, 0, sizeof(mig));
886-
mig.vma = vma;
887-
mig.start = start;
888-
mig.end = end;
889-
mig.src = &src_pfn;
890-
mig.dst = &dst_pfn;
891-
mig.src_owner = &kvmppc_uvmem_pgmap;
892-
893-
mutex_lock(&kvm->arch.uvmem_lock);
894-
/* The requested page is already paged-out, nothing to do */
895-
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
896-
goto out;
897-
898-
ret = migrate_vma_setup(&mig);
899-
if (ret)
900-
goto out;
901-
902-
spage = migrate_pfn_to_page(*mig.src);
903-
if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
904-
goto out_finalize;
905-
906-
if (!is_zone_device_page(spage))
907-
goto out_finalize;
908-
909-
dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
910-
if (!dpage) {
911-
ret = -1;
912-
goto out_finalize;
913-
}
914-
915-
lock_page(dpage);
916-
pvt = spage->zone_device_data;
917-
pfn = page_to_pfn(dpage);
918-
919-
/*
920-
* This function is used in two cases:
921-
* - When HV touches a secure page, for which we do UV_PAGE_OUT
922-
* - When a secure page is converted to shared page, we *get*
923-
* the page to essentially unmap the device page. In this
924-
* case we skip page-out.
925-
*/
926-
if (!pvt->skip_page_out)
927-
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
928-
gpa, 0, page_shift);
929-
930-
if (ret == U_SUCCESS)
931-
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
932-
else {
933-
unlock_page(dpage);
934-
__free_page(dpage);
935-
goto out_finalize;
936-
}
937-
938-
migrate_vma_pages(&mig);
939-
out_finalize:
940-
migrate_vma_finalize(&mig);
941-
out:
942-
mutex_unlock(&kvm->arch.uvmem_lock);
943-
return ret;
944-
}
945959

946960
/*
947961
* Fault handler callback that gets called when HV touches any page that

0 commit comments

Comments
 (0)