Skip to content

Commit dfaa973

Browse files
Ram Paipaulusmack
authored andcommitted
KVM: PPC: Book3S HV: In H_SVM_INIT_DONE, migrate remaining normal-GFNs to secure-GFNs
The Ultravisor is expected to explicitly call H_SVM_PAGE_IN for all the pages of the SVM before calling H_SVM_INIT_DONE. This causes a huge delay in tranistioning the VM to SVM. The Ultravisor is only interested in the pages that contain the kernel, initrd and other important data structures. The rest contain throw-away content. However if not all pages are requested by the Ultravisor, the Hypervisor continues to consider the GFNs corresponding to the non-requested pages as normal GFNs. This can lead to data-corruption and undefined behavior. In H_SVM_INIT_DONE handler, move all the PFNs associated with the SVM's GFNs to secure-PFNs. Skip the GFNs that are already Paged-in or Shared or Paged-in followed by a Paged-out. Reviewed-by: Bharata B Rao <[email protected]> Signed-off-by: Ram Pai <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent 651a631 commit dfaa973

File tree

2 files changed

+134
-22
lines changed

2 files changed

+134
-22
lines changed

Documentation/powerpc/ultravisor.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -934,6 +934,8 @@ Return values
934934
* H_UNSUPPORTED if called from the wrong context (e.g.
935935
from an SVM or before an H_SVM_INIT_START
936936
hypercall).
937+
* H_STATE if the hypervisor could not successfully
938+
transition the VM to Secure VM.
937939

938940
Description
939941
~~~~~~~~~~~

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 132 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@
9393
#include <asm/ultravisor.h>
9494
#include <asm/mman.h>
9595
#include <asm/kvm_ppc.h>
96+
#include <asm/kvm_book3s_uvmem.h>
9697

9798
static struct dev_pagemap kvmppc_uvmem_pgmap;
9899
static unsigned long *kvmppc_uvmem_bitmap;
@@ -348,6 +349,41 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
348349
return false;
349350
}
350351

352+
/*
353+
* starting from *gfn search for the next available GFN that is not yet
354+
* transitioned to a secure GFN. return the value of that GFN in *gfn. If a
355+
* GFN is found, return true, else return false
356+
*
357+
* Must be called with kvm->arch.uvmem_lock held.
358+
*/
359+
static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
360+
struct kvm *kvm, unsigned long *gfn)
361+
{
362+
struct kvmppc_uvmem_slot *p;
363+
bool ret = false;
364+
unsigned long i;
365+
366+
list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
367+
if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
368+
break;
369+
if (!p)
370+
return ret;
371+
/*
372+
* The code below assumes, one to one correspondence between
373+
* kvmppc_uvmem_slot and memslot.
374+
*/
375+
for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
376+
unsigned long index = i - p->base_pfn;
377+
378+
if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
379+
*gfn = i;
380+
ret = true;
381+
break;
382+
}
383+
}
384+
return ret;
385+
}
386+
351387
static int kvmppc_memslot_page_merge(struct kvm *kvm,
352388
const struct kvm_memory_slot *memslot, bool merge)
353389
{
@@ -460,16 +496,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
460496
return ret;
461497
}
462498

463-
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
464-
{
465-
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
466-
return H_UNSUPPORTED;
467-
468-
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
469-
pr_info("LPID %d went secure\n", kvm->arch.lpid);
470-
return H_SUCCESS;
471-
}
472-
473499
/*
474500
* Drop device pages that we maintain for the secure guest
475501
*
@@ -588,12 +614,14 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
588614
}
589615

590616
/*
591-
* Alloc a PFN from private device memory pool and copy page from normal
592-
* memory to secure memory using UV_PAGE_IN uvcall.
617+
* Alloc a PFN from private device memory pool. If @pagein is true,
618+
* copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
593619
*/
594-
static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
595-
unsigned long end, unsigned long gpa, struct kvm *kvm,
596-
unsigned long page_shift)
620+
static int kvmppc_svm_page_in(struct vm_area_struct *vma,
621+
unsigned long start,
622+
unsigned long end, unsigned long gpa, struct kvm *kvm,
623+
unsigned long page_shift,
624+
bool pagein)
597625
{
598626
unsigned long src_pfn, dst_pfn = 0;
599627
struct migrate_vma mig;
@@ -624,11 +652,16 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
624652
goto out_finalize;
625653
}
626654

627-
pfn = *mig.src >> MIGRATE_PFN_SHIFT;
628-
spage = migrate_pfn_to_page(*mig.src);
629-
if (spage)
630-
uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
631-
page_shift);
655+
if (pagein) {
656+
pfn = *mig.src >> MIGRATE_PFN_SHIFT;
657+
spage = migrate_pfn_to_page(*mig.src);
658+
if (spage) {
659+
ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
660+
gpa, 0, page_shift);
661+
if (ret)
662+
goto out_finalize;
663+
}
664+
}
632665

633666
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
634667
migrate_vma_pages(&mig);
@@ -637,6 +670,80 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
637670
return ret;
638671
}
639672

673+
static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
674+
const struct kvm_memory_slot *memslot)
675+
{
676+
unsigned long gfn = memslot->base_gfn;
677+
struct vm_area_struct *vma;
678+
unsigned long start, end;
679+
int ret = 0;
680+
681+
mmap_read_lock(kvm->mm);
682+
mutex_lock(&kvm->arch.uvmem_lock);
683+
while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
684+
ret = H_STATE;
685+
start = gfn_to_hva(kvm, gfn);
686+
if (kvm_is_error_hva(start))
687+
break;
688+
689+
end = start + (1UL << PAGE_SHIFT);
690+
vma = find_vma_intersection(kvm->mm, start, end);
691+
if (!vma || vma->vm_start > start || vma->vm_end < end)
692+
break;
693+
694+
ret = kvmppc_svm_page_in(vma, start, end,
695+
(gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
696+
if (ret) {
697+
ret = H_STATE;
698+
break;
699+
}
700+
701+
/* relinquish the cpu if needed */
702+
cond_resched();
703+
}
704+
mutex_unlock(&kvm->arch.uvmem_lock);
705+
mmap_read_unlock(kvm->mm);
706+
return ret;
707+
}
708+
709+
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
710+
{
711+
struct kvm_memslots *slots;
712+
struct kvm_memory_slot *memslot;
713+
int srcu_idx;
714+
long ret = H_SUCCESS;
715+
716+
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
717+
return H_UNSUPPORTED;
718+
719+
/* migrate any unmoved normal pfn to device pfns*/
720+
srcu_idx = srcu_read_lock(&kvm->srcu);
721+
slots = kvm_memslots(kvm);
722+
kvm_for_each_memslot(memslot, slots) {
723+
ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
724+
if (ret) {
725+
/*
726+
* The pages will remain transitioned.
727+
* Its the callers responsibility to
728+
* terminate the VM, which will undo
729+
* all state of the VM. Till then
730+
* this VM is in a erroneous state.
731+
* Its KVMPPC_SECURE_INIT_DONE will
732+
* remain unset.
733+
*/
734+
ret = H_STATE;
735+
goto out;
736+
}
737+
}
738+
739+
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
740+
pr_info("LPID %d went secure\n", kvm->arch.lpid);
741+
742+
out:
743+
srcu_read_unlock(&kvm->srcu, srcu_idx);
744+
return ret;
745+
}
746+
640747
/*
641748
* Shares the page with HV, thus making it a normal page.
642749
*
@@ -745,8 +852,11 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
745852
if (!vma || vma->vm_start > start || vma->vm_end < end)
746853
goto out_unlock;
747854

748-
if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift))
749-
ret = H_SUCCESS;
855+
if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
856+
true))
857+
goto out_unlock;
858+
859+
ret = H_SUCCESS;
750860

751861
out_unlock:
752862
mutex_unlock(&kvm->arch.uvmem_lock);

0 commit comments

Comments
 (0)