Skip to content

Commit 008e359

Browse files
Bharata B Raopaulusmack
authored andcommitted
KVM: PPC: Book3S HV: Radix changes for secure guest
- After the guest becomes secure, when we handle a page fault of a page belonging to SVM in HV, send that page to UV via UV_PAGE_IN. - Whenever a page is unmapped on the HV side, inform UV via UV_PAGE_INVAL. - Ensure all those routines that walk the secondary page tables of the guest don't do so in case of secure VM. For secure guest, the active secondary page tables are in secure memory and the secondary page tables in HV are freed when guest becomes secure. Signed-off-by: Bharata B Rao <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent 60f0a64 commit 008e359

File tree

5 files changed

+66
-0
lines changed

5 files changed

+66
-0
lines changed

arch/powerpc/include/asm/kvm_book3s_uvmem.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
1818
unsigned long page_shift);
1919
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
2020
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
21+
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
2122
#else
2223
static inline int kvmppc_uvmem_init(void)
2324
{
@@ -58,5 +59,10 @@ static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
5859
{
5960
return H_UNSUPPORTED;
6061
}
62+
63+
static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
64+
{
65+
return -EFAULT;
66+
}
6167
#endif /* CONFIG_PPC_UV */
6268
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */

arch/powerpc/include/asm/ultravisor-api.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,5 +32,6 @@
3232
#define UV_SHARE_PAGE 0xF130
3333
#define UV_UNSHARE_PAGE 0xF134
3434
#define UV_UNSHARE_ALL_PAGES 0xF140
35+
#define UV_PAGE_INVAL 0xF138
3536

3637
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */

arch/powerpc/include/asm/ultravisor.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,4 +67,9 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
6767
size, flags, slotid);
6868
}
6969

70+
static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
71+
{
72+
return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
73+
}
74+
7075
#endif /* _ASM_POWERPC_ULTRAVISOR_H */

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
#include <asm/pgtable.h>
2020
#include <asm/pgalloc.h>
2121
#include <asm/pte-walk.h>
22+
#include <asm/ultravisor.h>
23+
#include <asm/kvm_book3s_uvmem.h>
2224

2325
/*
2426
* Supported radix tree geometry.
@@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
915917
if (!(dsisr & DSISR_PRTABLE_FAULT))
916918
gpa |= ea & 0xfff;
917919

920+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
921+
return kvmppc_send_page_to_uv(kvm, gfn);
922+
918923
/* Get the corresponding memslot */
919924
memslot = gfn_to_memslot(kvm, gfn);
920925

@@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
972977
unsigned long gpa = gfn << PAGE_SHIFT;
973978
unsigned int shift;
974979

980+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
981+
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
982+
return 0;
983+
}
984+
975985
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
976986
if (ptep && pte_present(*ptep))
977987
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
@@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
989999
int ref = 0;
9901000
unsigned long old, *rmapp;
9911001

1002+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1003+
return ref;
1004+
9921005
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
9931006
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
9941007
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
@@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
10131026
unsigned int shift;
10141027
int ref = 0;
10151028

1029+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1030+
return ref;
1031+
10161032
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
10171033
if (ptep && pte_present(*ptep) && pte_young(*ptep))
10181034
ref = 1;
@@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
10301046
int ret = 0;
10311047
unsigned long old, *rmapp;
10321048

1049+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1050+
return ret;
1051+
10331052
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
10341053
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
10351054
ret = 1;
@@ -1082,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
10821101
unsigned long gpa;
10831102
unsigned int shift;
10841103

1104+
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1105+
return;
1106+
10851107
gpa = memslot->base_gfn << PAGE_SHIFT;
10861108
spin_lock(&kvm->mmu_lock);
10871109
for (n = memslot->npages; n; --n) {

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,17 @@
6969
* Shared pages: Whenever guest shares a secure page, UV will split and
7070
* remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
7171
*
72+
* HV invalidating a page: When a regular page belonging to secure
73+
* guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74+
* page size. Using 64K page size is correct here because any non-secure
75+
* page will essentially be of 64K page size. Splitting by UV during sharing
76+
* and page-out ensures this.
77+
*
78+
* Page fault handling: When HV handles page fault of a page belonging
79+
* to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80+
* Using 64K size is correct here too as UV would have split the 2MB page
81+
* into 64k mappings and would have done page-outs earlier.
82+
*
7283
* In summary, the current secure pages handling code in HV assumes
7384
* 64K page size and in fact fails any page-in/page-out requests of
7485
* non-64K size upfront. If and when UV starts supporting multiple
@@ -630,6 +641,27 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
630641
return ret;
631642
}
632643

644+
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
645+
{
646+
unsigned long pfn;
647+
int ret = U_SUCCESS;
648+
649+
pfn = gfn_to_pfn(kvm, gfn);
650+
if (is_error_noslot_pfn(pfn))
651+
return -EFAULT;
652+
653+
mutex_lock(&kvm->arch.uvmem_lock);
654+
if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
655+
goto out;
656+
657+
ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
658+
0, PAGE_SHIFT);
659+
out:
660+
kvm_release_pfn_clean(pfn);
661+
mutex_unlock(&kvm->arch.uvmem_lock);
662+
return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
663+
}
664+
633665
static u64 kvmppc_get_secmem_size(void)
634666
{
635667
struct device_node *np;

0 commit comments

Comments
 (0)