Skip to content

Commit 66a644c

Browse files
committed
KVM: guest_memfd: abstract how prepared folios are recorded
Right now, large folios are not supported in guest_memfd, and therefore the order used by kvm_gmem_populate() is always 0. In this scenario, using the up-to-date bit to track prepared-ness is nice and easy because we have one bit available per page. In the future, however, we might have large pages that are partially populated; for example, in the case of SEV-SNP, if a large page has both shared and private areas inside, it is necessary to populate it at a granularity that is smaller than that of the guest_memfd's backing store. In that case we will have to track preparedness at a 4K level, probably as a bitmap. In preparation for that, do not use explicitly folio_test_uptodate() and folio_mark_uptodate(). Return the state of the page directly from __kvm_gmem_get_pfn(), so that it is expected to apply to 2^N pages with N=*max_order. The function to mark a range as prepared for now takes just a folio, but is expected to take also an index and order (or something like that) when large pages are introduced. Thanks to Michael Roth for pointing out the issue with large pages. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e4ee544 commit 66a644c

File tree

1 file changed

+20
-13
lines changed

1 file changed

+20
-13
lines changed

virt/kvm/guest_memfd.c

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,11 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
4242
return 0;
4343
}
4444

45+
static inline void kvm_gmem_mark_prepared(struct folio *folio)
46+
{
47+
folio_mark_uptodate(folio);
48+
}
49+
4550
/*
4651
* Process @folio, which contains @gfn, so that the guest can use it.
4752
* The folio must be locked and the gfn must be contained in @slot.
@@ -55,9 +60,6 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
5560
pgoff_t index;
5661
int r;
5762

58-
if (folio_test_uptodate(folio))
59-
return 0;
60-
6163
nr_pages = folio_nr_pages(folio);
6264
for (i = 0; i < nr_pages; i++)
6365
clear_highpage(folio_page(folio, i));
@@ -80,7 +82,7 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
8082
index = ALIGN_DOWN(index, 1 << folio_order(folio));
8183
r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
8284
if (!r)
83-
folio_mark_uptodate(folio);
85+
kvm_gmem_mark_prepared(folio);
8486

8587
return r;
8688
}
@@ -551,7 +553,8 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
551553
/* Returns a locked folio on success. */
552554
static struct folio *
553555
__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
554-
gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
556+
gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
557+
int *max_order)
555558
{
556559
pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
557560
struct kvm_gmem *gmem = file->private_data;
@@ -582,6 +585,7 @@ __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
582585
if (max_order)
583586
*max_order = 0;
584587

588+
*is_prepared = folio_test_uptodate(folio);
585589
return folio;
586590
}
587591

@@ -590,18 +594,21 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
590594
{
591595
struct file *file = kvm_gmem_get_file(slot);
592596
struct folio *folio;
597+
bool is_prepared = false;
593598
int r = 0;
594599

595600
if (!file)
596601
return -EFAULT;
597602

598-
folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order);
603+
folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
599604
if (IS_ERR(folio)) {
600605
r = PTR_ERR(folio);
601606
goto out;
602607
}
603608

604-
r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
609+
if (!is_prepared)
610+
r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
611+
605612
folio_unlock(folio);
606613
if (r < 0)
607614
folio_put(folio);
@@ -641,30 +648,30 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
641648
for (i = 0; i < npages; i += (1 << max_order)) {
642649
struct folio *folio;
643650
gfn_t gfn = start_gfn + i;
651+
bool is_prepared = false;
644652
kvm_pfn_t pfn;
645653

646654
if (signal_pending(current)) {
647655
ret = -EINTR;
648656
break;
649657
}
650658

651-
folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order);
659+
folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
652660
if (IS_ERR(folio)) {
653661
ret = PTR_ERR(folio);
654662
break;
655663
}
656664

657-
if (folio_test_uptodate(folio)) {
665+
if (is_prepared) {
658666
folio_unlock(folio);
659667
folio_put(folio);
660668
ret = -EEXIST;
661669
break;
662670
}
663671

664672
folio_unlock(folio);
665-
if (!IS_ALIGNED(gfn, (1 << max_order)) ||
666-
(npages - i) < (1 << max_order))
667-
max_order = 0;
673+
WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
674+
(npages - i) < (1 << max_order));
668675

669676
ret = -EINVAL;
670677
while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
@@ -678,7 +685,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
678685
p = src ? src + i * PAGE_SIZE : NULL;
679686
ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
680687
if (!ret)
681-
folio_mark_uptodate(folio);
688+
kvm_gmem_mark_prepared(folio);
682689

683690
put_folio_and_exit:
684691
folio_put(folio);

0 commit comments

Comments
 (0)