Skip to content

Commit 60f0a64

Browse files
Bharata B Raopaulusmack
authored andcommitted
KVM: PPC: Book3S HV: Shared pages support for secure guests
A secure guest will share some of its pages with hypervisor (Eg. virtio bounce buffers etc). Support sharing of pages between hypervisor and ultravisor. Shared page is reachable via both HV and UV side page tables. Once a secure page is converted to shared page, the device page that represents the secure page is unmapped from the HV side page tables. Signed-off-by: Bharata B Rao <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent ca9f494 commit 60f0a64

File tree

2 files changed

+84
-4
lines changed

2 files changed

+84
-4
lines changed

arch/powerpc/include/asm/hvcall.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,9 @@
342342
#define H_TLB_INVALIDATE 0xF808
343343
#define H_COPY_TOFROM_GUEST 0xF80C
344344

345+
/* Flags for H_SVM_PAGE_IN */
346+
#define H_PAGE_IN_SHARED 0x1
347+
345348
/* Platform-specific hcalls used by the Ultravisor */
346349
#define H_SVM_PAGE_IN 0xEF00
347350
#define H_SVM_PAGE_OUT 0xEF04

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 81 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@
1919
* available in the platform for running secure guests is hotplugged.
2020
* Whenever a page belonging to the guest becomes secure, a page from this
2121
* private device memory is used to represent and track that secure page
22-
* on the HV side.
22+
* on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23+
* shared between UV and HV. However such pages aren't represented by
24+
* device private memory and mappings to shared memory exist in both
25+
* UV and HV page tables.
2326
*/
2427

2528
/*
@@ -63,6 +66,9 @@
6366
* UV splits and remaps the 2MB page if necessary and copies out the
6467
* required 64K page contents.
6568
*
69+
* Shared pages: Whenever guest shares a secure page, UV will split and
70+
* remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71+
*
6672
* In summary, the current secure pages handling code in HV assumes
6773
* 64K page size and in fact fails any page-in/page-out requests of
6874
* non-64K size upfront. If and when UV starts supporting multiple
@@ -93,6 +99,7 @@ struct kvmppc_uvmem_slot {
9399
struct kvmppc_uvmem_page_pvt {
94100
struct kvm *kvm;
95101
unsigned long gpa;
102+
bool skip_page_out;
96103
};
97104

98105
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
@@ -344,8 +351,64 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
344351
return ret;
345352
}
346353

354+
/*
355+
* Shares the page with HV, thus making it a normal page.
356+
*
357+
* - If the page is already secure, then provision a new page and share
358+
* - If the page is a normal page, share the existing page
359+
*
360+
* In the former case, uses dev_pagemap_ops.migrate_to_ram handler
361+
* to unmap the device page from QEMU's page tables.
362+
*/
363+
static unsigned long
364+
kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
365+
{
366+
367+
int ret = H_PARAMETER;
368+
struct page *uvmem_page;
369+
struct kvmppc_uvmem_page_pvt *pvt;
370+
unsigned long pfn;
371+
unsigned long gfn = gpa >> page_shift;
372+
int srcu_idx;
373+
unsigned long uvmem_pfn;
374+
375+
srcu_idx = srcu_read_lock(&kvm->srcu);
376+
mutex_lock(&kvm->arch.uvmem_lock);
377+
if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
378+
uvmem_page = pfn_to_page(uvmem_pfn);
379+
pvt = uvmem_page->zone_device_data;
380+
pvt->skip_page_out = true;
381+
}
382+
383+
retry:
384+
mutex_unlock(&kvm->arch.uvmem_lock);
385+
pfn = gfn_to_pfn(kvm, gfn);
386+
if (is_error_noslot_pfn(pfn))
387+
goto out;
388+
389+
mutex_lock(&kvm->arch.uvmem_lock);
390+
if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
391+
uvmem_page = pfn_to_page(uvmem_pfn);
392+
pvt = uvmem_page->zone_device_data;
393+
pvt->skip_page_out = true;
394+
kvm_release_pfn_clean(pfn);
395+
goto retry;
396+
}
397+
398+
if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
399+
ret = H_SUCCESS;
400+
kvm_release_pfn_clean(pfn);
401+
mutex_unlock(&kvm->arch.uvmem_lock);
402+
out:
403+
srcu_read_unlock(&kvm->srcu, srcu_idx);
404+
return ret;
405+
}
406+
347407
/*
348408
* H_SVM_PAGE_IN: Move page from normal memory to secure memory.
409+
*
410+
* H_PAGE_IN_SHARED flag makes the page shared which means that the same
411+
* memory in is visible from both UV and HV.
349412
*/
350413
unsigned long
351414
kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
@@ -364,9 +427,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
364427
if (page_shift != PAGE_SHIFT)
365428
return H_P3;
366429

367-
if (flags)
430+
if (flags & ~H_PAGE_IN_SHARED)
368431
return H_P2;
369432

433+
if (flags & H_PAGE_IN_SHARED)
434+
return kvmppc_share_page(kvm, gpa, page_shift);
435+
370436
ret = H_PARAMETER;
371437
srcu_idx = srcu_read_lock(&kvm->srcu);
372438
down_write(&kvm->mm->mmap_sem);
@@ -411,6 +477,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
411477
unsigned long src_pfn, dst_pfn = 0;
412478
struct migrate_vma mig;
413479
struct page *dpage, *spage;
480+
struct kvmppc_uvmem_page_pvt *pvt;
414481
unsigned long pfn;
415482
int ret = U_SUCCESS;
416483

@@ -444,10 +511,20 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
444511
}
445512

446513
lock_page(dpage);
514+
pvt = spage->zone_device_data;
447515
pfn = page_to_pfn(dpage);
448516

449-
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
450-
gpa, 0, page_shift);
517+
/*
518+
* This function is used in two cases:
519+
* - When HV touches a secure page, for which we do UV_PAGE_OUT
520+
* - When a secure page is converted to shared page, we *get*
521+
* the page to essentially unmap the device page. In this
522+
* case we skip page-out.
523+
*/
524+
if (!pvt->skip_page_out)
525+
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
526+
gpa, 0, page_shift);
527+
451528
if (ret == U_SUCCESS)
452529
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
453530
else {

0 commit comments

Comments
 (0)