19
19
* available in the platform for running secure guests is hotplugged.
20
20
* Whenever a page belonging to the guest becomes secure, a page from this
21
21
* private device memory is used to represent and track that secure page
22
- * on the HV side.
22
+ * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23
+ * shared between UV and HV. However such pages aren't represented by
24
+ * device private memory and mappings to shared memory exist in both
25
+ * UV and HV page tables.
23
26
*/
24
27
25
28
/*
63
66
* UV splits and remaps the 2MB page if necessary and copies out the
64
67
* required 64K page contents.
65
68
*
69
+ * Shared pages: Whenever guest shares a secure page, UV will split and
70
+ * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71
+ *
66
72
* In summary, the current secure pages handling code in HV assumes
67
73
* 64K page size and in fact fails any page-in/page-out requests of
68
74
* non-64K size upfront. If and when UV starts supporting multiple
@@ -93,6 +99,7 @@ struct kvmppc_uvmem_slot {
93
99
struct kvmppc_uvmem_page_pvt {
94
100
struct kvm * kvm ;
95
101
unsigned long gpa ;
102
+ bool skip_page_out ;
96
103
};
97
104
98
105
int kvmppc_uvmem_slot_init (struct kvm * kvm , const struct kvm_memory_slot * slot )
@@ -344,8 +351,64 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
344
351
return ret ;
345
352
}
346
353
354
+ /*
355
+ * Shares the page with HV, thus making it a normal page.
356
+ *
357
+ * - If the page is already secure, then provision a new page and share
358
+ * - If the page is a normal page, share the existing page
359
+ *
360
+ * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
361
+ * to unmap the device page from QEMU's page tables.
362
+ */
363
+ static unsigned long
364
+ kvmppc_share_page (struct kvm * kvm , unsigned long gpa , unsigned long page_shift )
365
+ {
366
+
367
+ int ret = H_PARAMETER ;
368
+ struct page * uvmem_page ;
369
+ struct kvmppc_uvmem_page_pvt * pvt ;
370
+ unsigned long pfn ;
371
+ unsigned long gfn = gpa >> page_shift ;
372
+ int srcu_idx ;
373
+ unsigned long uvmem_pfn ;
374
+
375
+ srcu_idx = srcu_read_lock (& kvm -> srcu );
376
+ mutex_lock (& kvm -> arch .uvmem_lock );
377
+ if (kvmppc_gfn_is_uvmem_pfn (gfn , kvm , & uvmem_pfn )) {
378
+ uvmem_page = pfn_to_page (uvmem_pfn );
379
+ pvt = uvmem_page -> zone_device_data ;
380
+ pvt -> skip_page_out = true;
381
+ }
382
+
383
+ retry :
384
+ mutex_unlock (& kvm -> arch .uvmem_lock );
385
+ pfn = gfn_to_pfn (kvm , gfn );
386
+ if (is_error_noslot_pfn (pfn ))
387
+ goto out ;
388
+
389
+ mutex_lock (& kvm -> arch .uvmem_lock );
390
+ if (kvmppc_gfn_is_uvmem_pfn (gfn , kvm , & uvmem_pfn )) {
391
+ uvmem_page = pfn_to_page (uvmem_pfn );
392
+ pvt = uvmem_page -> zone_device_data ;
393
+ pvt -> skip_page_out = true;
394
+ kvm_release_pfn_clean (pfn );
395
+ goto retry ;
396
+ }
397
+
398
+ if (!uv_page_in (kvm -> arch .lpid , pfn << page_shift , gpa , 0 , page_shift ))
399
+ ret = H_SUCCESS ;
400
+ kvm_release_pfn_clean (pfn );
401
+ mutex_unlock (& kvm -> arch .uvmem_lock );
402
+ out :
403
+ srcu_read_unlock (& kvm -> srcu , srcu_idx );
404
+ return ret ;
405
+ }
406
+
347
407
/*
348
408
* H_SVM_PAGE_IN: Move page from normal memory to secure memory.
409
+ *
410
+ * H_PAGE_IN_SHARED flag makes the page shared which means that the same
411
+ * memory in is visible from both UV and HV.
349
412
*/
350
413
unsigned long
351
414
kvmppc_h_svm_page_in (struct kvm * kvm , unsigned long gpa ,
@@ -364,9 +427,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
364
427
if (page_shift != PAGE_SHIFT )
365
428
return H_P3 ;
366
429
367
- if (flags )
430
+ if (flags & ~ H_PAGE_IN_SHARED )
368
431
return H_P2 ;
369
432
433
+ if (flags & H_PAGE_IN_SHARED )
434
+ return kvmppc_share_page (kvm , gpa , page_shift );
435
+
370
436
ret = H_PARAMETER ;
371
437
srcu_idx = srcu_read_lock (& kvm -> srcu );
372
438
down_write (& kvm -> mm -> mmap_sem );
@@ -411,6 +477,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
411
477
unsigned long src_pfn , dst_pfn = 0 ;
412
478
struct migrate_vma mig ;
413
479
struct page * dpage , * spage ;
480
+ struct kvmppc_uvmem_page_pvt * pvt ;
414
481
unsigned long pfn ;
415
482
int ret = U_SUCCESS ;
416
483
@@ -444,10 +511,20 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
444
511
}
445
512
446
513
lock_page (dpage );
514
+ pvt = spage -> zone_device_data ;
447
515
pfn = page_to_pfn (dpage );
448
516
449
- ret = uv_page_out (kvm -> arch .lpid , pfn << page_shift ,
450
- gpa , 0 , page_shift );
517
+ /*
518
+ * This function is used in two cases:
519
+ * - When HV touches a secure page, for which we do UV_PAGE_OUT
520
+ * - When a secure page is converted to shared page, we *get*
521
+ * the page to essentially unmap the device page. In this
522
+ * case we skip page-out.
523
+ */
524
+ if (!pvt -> skip_page_out )
525
+ ret = uv_page_out (kvm -> arch .lpid , pfn << page_shift ,
526
+ gpa , 0 , page_shift );
527
+
451
528
if (ret == U_SUCCESS )
452
529
* mig .dst = migrate_pfn (pfn ) | MIGRATE_PFN_LOCKED ;
453
530
else {
0 commit comments