Skip to content

Commit 2027a24

Browse files
Ram Paipaulusmack
authored andcommitted
KVM: PPC: Book3S HV: Disable page merging in H_SVM_INIT_START
Page-merging of pages in memory-slots associated with a Secure VM is disabled in H_SVM_PAGE_IN handler. This operation should have been done the much earlier; the moment the VM is initiated for secure-transition. Delaying this operation increases the probability for those pages to acquire new references, making it impossible to migrate those pages in H_SVM_PAGE_IN handler. Disable page-migration in H_SVM_INIT_START handling. Reviewed-by: Bharata B Rao <[email protected]> Signed-off-by: Ram Pai <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent 48908a3 commit 2027a24

File tree

2 files changed

+89
-35
lines changed

2 files changed

+89
-35
lines changed

Documentation/powerpc/ultravisor.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -895,6 +895,7 @@ Return values
895895
One of the following values:
896896

897897
* H_SUCCESS on success.
898+
* H_STATE if the VM is not in a position to switch to secure.
898899

899900
Description
900901
~~~~~~~~~~~

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 88 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -211,10 +211,79 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
211211
return false;
212212
}
213213

214+
static int kvmppc_memslot_page_merge(struct kvm *kvm,
215+
const struct kvm_memory_slot *memslot, bool merge)
216+
{
217+
unsigned long gfn = memslot->base_gfn;
218+
unsigned long end, start = gfn_to_hva(kvm, gfn);
219+
int ret = 0;
220+
struct vm_area_struct *vma;
221+
int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
222+
223+
if (kvm_is_error_hva(start))
224+
return H_STATE;
225+
226+
end = start + (memslot->npages << PAGE_SHIFT);
227+
228+
mmap_write_lock(kvm->mm);
229+
do {
230+
vma = find_vma_intersection(kvm->mm, start, end);
231+
if (!vma) {
232+
ret = H_STATE;
233+
break;
234+
}
235+
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
236+
merge_flag, &vma->vm_flags);
237+
if (ret) {
238+
ret = H_STATE;
239+
break;
240+
}
241+
start = vma->vm_end;
242+
} while (end > vma->vm_end);
243+
244+
mmap_write_unlock(kvm->mm);
245+
return ret;
246+
}
247+
248+
static void kvmppc_uvmem_memslot_delete(struct kvm *kvm,
249+
const struct kvm_memory_slot *memslot)
250+
{
251+
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
252+
kvmppc_uvmem_slot_free(kvm, memslot);
253+
kvmppc_memslot_page_merge(kvm, memslot, true);
254+
}
255+
256+
static int kvmppc_uvmem_memslot_create(struct kvm *kvm,
257+
const struct kvm_memory_slot *memslot)
258+
{
259+
int ret = H_PARAMETER;
260+
261+
if (kvmppc_memslot_page_merge(kvm, memslot, false))
262+
return ret;
263+
264+
if (kvmppc_uvmem_slot_init(kvm, memslot))
265+
goto out1;
266+
267+
ret = uv_register_mem_slot(kvm->arch.lpid,
268+
memslot->base_gfn << PAGE_SHIFT,
269+
memslot->npages * PAGE_SIZE,
270+
0, memslot->id);
271+
if (ret < 0) {
272+
ret = H_PARAMETER;
273+
goto out;
274+
}
275+
return 0;
276+
out:
277+
kvmppc_uvmem_slot_free(kvm, memslot);
278+
out1:
279+
kvmppc_memslot_page_merge(kvm, memslot, true);
280+
return ret;
281+
}
282+
214283
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
215284
{
216285
struct kvm_memslots *slots;
217-
struct kvm_memory_slot *memslot;
286+
struct kvm_memory_slot *memslot, *m;
218287
int ret = H_SUCCESS;
219288
int srcu_idx;
220289

@@ -232,23 +301,24 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
232301
return H_AUTHORITY;
233302

234303
srcu_idx = srcu_read_lock(&kvm->srcu);
304+
305+
/* register the memslot */
235306
slots = kvm_memslots(kvm);
236307
kvm_for_each_memslot(memslot, slots) {
237-
if (kvmppc_uvmem_slot_init(kvm, memslot)) {
238-
ret = H_PARAMETER;
239-
goto out;
240-
}
241-
ret = uv_register_mem_slot(kvm->arch.lpid,
242-
memslot->base_gfn << PAGE_SHIFT,
243-
memslot->npages * PAGE_SIZE,
244-
0, memslot->id);
245-
if (ret < 0) {
246-
kvmppc_uvmem_slot_free(kvm, memslot);
247-
ret = H_PARAMETER;
248-
goto out;
308+
ret = kvmppc_uvmem_memslot_create(kvm, memslot);
309+
if (ret)
310+
break;
311+
}
312+
313+
if (ret) {
314+
slots = kvm_memslots(kvm);
315+
kvm_for_each_memslot(m, slots) {
316+
if (m == memslot)
317+
break;
318+
kvmppc_uvmem_memslot_delete(kvm, memslot);
249319
}
250320
}
251-
out:
321+
252322
srcu_read_unlock(&kvm->srcu, srcu_idx);
253323
return ret;
254324
}
@@ -384,7 +454,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
384454
*/
385455
static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
386456
unsigned long end, unsigned long gpa, struct kvm *kvm,
387-
unsigned long page_shift, bool *downgrade)
457+
unsigned long page_shift)
388458
{
389459
unsigned long src_pfn, dst_pfn = 0;
390460
struct migrate_vma mig;
@@ -400,18 +470,6 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
400470
mig.src = &src_pfn;
401471
mig.dst = &dst_pfn;
402472

403-
/*
404-
* We come here with mmap_lock write lock held just for
405-
* ksm_madvise(), otherwise we only need read mmap_lock.
406-
* Hence downgrade to read lock once ksm_madvise() is done.
407-
*/
408-
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
409-
MADV_UNMERGEABLE, &vma->vm_flags);
410-
mmap_write_downgrade(kvm->mm);
411-
*downgrade = true;
412-
if (ret)
413-
return ret;
414-
415473
ret = migrate_vma_setup(&mig);
416474
if (ret)
417475
return ret;
@@ -503,7 +561,6 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
503561
unsigned long flags,
504562
unsigned long page_shift)
505563
{
506-
bool downgrade = false;
507564
unsigned long start, end;
508565
struct vm_area_struct *vma;
509566
int srcu_idx;
@@ -524,7 +581,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
524581

525582
ret = H_PARAMETER;
526583
srcu_idx = srcu_read_lock(&kvm->srcu);
527-
mmap_write_lock(kvm->mm);
584+
mmap_read_lock(kvm->mm);
528585

529586
start = gfn_to_hva(kvm, gfn);
530587
if (kvm_is_error_hva(start))
@@ -540,16 +597,12 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
540597
if (!vma || vma->vm_start > start || vma->vm_end < end)
541598
goto out_unlock;
542599

543-
if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
544-
&downgrade))
600+
if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift))
545601
ret = H_SUCCESS;
546602
out_unlock:
547603
mutex_unlock(&kvm->arch.uvmem_lock);
548604
out:
549-
if (downgrade)
550-
mmap_read_unlock(kvm->mm);
551-
else
552-
mmap_write_unlock(kvm->mm);
605+
mmap_read_unlock(kvm->mm);
553606
srcu_read_unlock(&kvm->srcu, srcu_idx);
554607
return ret;
555608
}

0 commit comments

Comments
 (0)