93
93
#include <asm/ultravisor.h>
94
94
#include <asm/mman.h>
95
95
#include <asm/kvm_ppc.h>
96
+ #include <asm/kvm_book3s_uvmem.h>
96
97
97
98
static struct dev_pagemap kvmppc_uvmem_pgmap ;
98
99
static unsigned long * kvmppc_uvmem_bitmap ;
@@ -348,6 +349,41 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
348
349
return false;
349
350
}
350
351
352
+ /*
353
+ * starting from *gfn search for the next available GFN that is not yet
354
+ * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
355
+ * GFN is found, return true, else return false
356
+ *
357
+ * Must be called with kvm->arch.uvmem_lock held.
358
+ */
359
+ static bool kvmppc_next_nontransitioned_gfn (const struct kvm_memory_slot * memslot ,
360
+ struct kvm * kvm , unsigned long * gfn )
361
+ {
362
+ struct kvmppc_uvmem_slot * p ;
363
+ bool ret = false;
364
+ unsigned long i ;
365
+
366
+ list_for_each_entry (p , & kvm -> arch .uvmem_pfns , list )
367
+ if (* gfn >= p -> base_pfn && * gfn < p -> base_pfn + p -> nr_pfns )
368
+ break ;
369
+ if (!p )
370
+ return ret ;
371
+ /*
372
+ * The code below assumes, one to one correspondence between
373
+ * kvmppc_uvmem_slot and memslot.
374
+ */
375
+ for (i = * gfn ; i < p -> base_pfn + p -> nr_pfns ; i ++ ) {
376
+ unsigned long index = i - p -> base_pfn ;
377
+
378
+ if (!(p -> pfns [index ] & KVMPPC_GFN_FLAG_MASK )) {
379
+ * gfn = i ;
380
+ ret = true;
381
+ break ;
382
+ }
383
+ }
384
+ return ret ;
385
+ }
386
+
351
387
static int kvmppc_memslot_page_merge (struct kvm * kvm ,
352
388
const struct kvm_memory_slot * memslot , bool merge )
353
389
{
@@ -460,16 +496,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
460
496
return ret ;
461
497
}
462
498
463
- unsigned long kvmppc_h_svm_init_done (struct kvm * kvm )
464
- {
465
- if (!(kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_START ))
466
- return H_UNSUPPORTED ;
467
-
468
- kvm -> arch .secure_guest |= KVMPPC_SECURE_INIT_DONE ;
469
- pr_info ("LPID %d went secure\n" , kvm -> arch .lpid );
470
- return H_SUCCESS ;
471
- }
472
-
473
499
/*
474
500
* Drop device pages that we maintain for the secure guest
475
501
*
@@ -588,12 +614,14 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
588
614
}
589
615
590
616
/*
591
- * Alloc a PFN from private device memory pool and copy page from normal
592
- * memory to secure memory using UV_PAGE_IN uvcall.
617
+ * Alloc a PFN from private device memory pool. If @pagein is true,
618
+ * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
593
619
*/
594
- static int kvmppc_svm_page_in (struct vm_area_struct * vma , unsigned long start ,
595
- unsigned long end , unsigned long gpa , struct kvm * kvm ,
596
- unsigned long page_shift )
620
+ static int kvmppc_svm_page_in (struct vm_area_struct * vma ,
621
+ unsigned long start ,
622
+ unsigned long end , unsigned long gpa , struct kvm * kvm ,
623
+ unsigned long page_shift ,
624
+ bool pagein )
597
625
{
598
626
unsigned long src_pfn , dst_pfn = 0 ;
599
627
struct migrate_vma mig ;
@@ -624,11 +652,16 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
624
652
goto out_finalize ;
625
653
}
626
654
627
- pfn = * mig .src >> MIGRATE_PFN_SHIFT ;
628
- spage = migrate_pfn_to_page (* mig .src );
629
- if (spage )
630
- uv_page_in (kvm -> arch .lpid , pfn << page_shift , gpa , 0 ,
631
- page_shift );
655
+ if (pagein ) {
656
+ pfn = * mig .src >> MIGRATE_PFN_SHIFT ;
657
+ spage = migrate_pfn_to_page (* mig .src );
658
+ if (spage ) {
659
+ ret = uv_page_in (kvm -> arch .lpid , pfn << page_shift ,
660
+ gpa , 0 , page_shift );
661
+ if (ret )
662
+ goto out_finalize ;
663
+ }
664
+ }
632
665
633
666
* mig .dst = migrate_pfn (page_to_pfn (dpage )) | MIGRATE_PFN_LOCKED ;
634
667
migrate_vma_pages (& mig );
@@ -637,6 +670,80 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
637
670
return ret ;
638
671
}
639
672
673
+ static int kvmppc_uv_migrate_mem_slot (struct kvm * kvm ,
674
+ const struct kvm_memory_slot * memslot )
675
+ {
676
+ unsigned long gfn = memslot -> base_gfn ;
677
+ struct vm_area_struct * vma ;
678
+ unsigned long start , end ;
679
+ int ret = 0 ;
680
+
681
+ mmap_read_lock (kvm -> mm );
682
+ mutex_lock (& kvm -> arch .uvmem_lock );
683
+ while (kvmppc_next_nontransitioned_gfn (memslot , kvm , & gfn )) {
684
+ ret = H_STATE ;
685
+ start = gfn_to_hva (kvm , gfn );
686
+ if (kvm_is_error_hva (start ))
687
+ break ;
688
+
689
+ end = start + (1UL << PAGE_SHIFT );
690
+ vma = find_vma_intersection (kvm -> mm , start , end );
691
+ if (!vma || vma -> vm_start > start || vma -> vm_end < end )
692
+ break ;
693
+
694
+ ret = kvmppc_svm_page_in (vma , start , end ,
695
+ (gfn << PAGE_SHIFT ), kvm , PAGE_SHIFT , false);
696
+ if (ret ) {
697
+ ret = H_STATE ;
698
+ break ;
699
+ }
700
+
701
+ /* relinquish the cpu if needed */
702
+ cond_resched ();
703
+ }
704
+ mutex_unlock (& kvm -> arch .uvmem_lock );
705
+ mmap_read_unlock (kvm -> mm );
706
+ return ret ;
707
+ }
708
+
709
+ unsigned long kvmppc_h_svm_init_done (struct kvm * kvm )
710
+ {
711
+ struct kvm_memslots * slots ;
712
+ struct kvm_memory_slot * memslot ;
713
+ int srcu_idx ;
714
+ long ret = H_SUCCESS ;
715
+
716
+ if (!(kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_START ))
717
+ return H_UNSUPPORTED ;
718
+
719
+ /* migrate any unmoved normal pfn to device pfns*/
720
+ srcu_idx = srcu_read_lock (& kvm -> srcu );
721
+ slots = kvm_memslots (kvm );
722
+ kvm_for_each_memslot (memslot , slots ) {
723
+ ret = kvmppc_uv_migrate_mem_slot (kvm , memslot );
724
+ if (ret ) {
725
+ /*
726
+ * The pages will remain transitioned.
727
+ * Its the callers responsibility to
728
+ * terminate the VM, which will undo
729
+ * all state of the VM. Till then
730
+ * this VM is in a erroneous state.
731
+ * Its KVMPPC_SECURE_INIT_DONE will
732
+ * remain unset.
733
+ */
734
+ ret = H_STATE ;
735
+ goto out ;
736
+ }
737
+ }
738
+
739
+ kvm -> arch .secure_guest |= KVMPPC_SECURE_INIT_DONE ;
740
+ pr_info ("LPID %d went secure\n" , kvm -> arch .lpid );
741
+
742
+ out :
743
+ srcu_read_unlock (& kvm -> srcu , srcu_idx );
744
+ return ret ;
745
+ }
746
+
640
747
/*
641
748
* Shares the page with HV, thus making it a normal page.
642
749
*
@@ -745,8 +852,11 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
745
852
if (!vma || vma -> vm_start > start || vma -> vm_end < end )
746
853
goto out_unlock ;
747
854
748
- if (!kvmppc_svm_page_in (vma , start , end , gpa , kvm , page_shift ))
749
- ret = H_SUCCESS ;
855
+ if (kvmppc_svm_page_in (vma , start , end , gpa , kvm , page_shift ,
856
+ true))
857
+ goto out_unlock ;
858
+
859
+ ret = H_SUCCESS ;
750
860
751
861
out_unlock :
752
862
mutex_unlock (& kvm -> arch .uvmem_lock );
0 commit comments