Skip to content

Commit d0d8722

Browse files
committed
KVM: guest_memfd: return folio from __kvm_gmem_get_pfn()
Right now this is simply more consistent and avoids use of pfn_to_page() and put_page(). It will be put to more use in upcoming patches, to ensure that the up-to-date flag is set at the very end of both the kvm_gmem_get_pfn() and kvm_gmem_populate() flows. Reviewed-by: Michael Roth <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 5932ca4 commit d0d8722

File tree

1 file changed

+20
-17
lines changed

1 file changed

+20
-17
lines changed

virt/kvm/guest_memfd.c

Lines changed: 20 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -541,34 +541,34 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
541541
fput(file);
542542
}
543543

544-
static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
545-
gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
544+
static struct folio *
545+
__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
546+
gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
546547
{
547548
pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
548549
struct kvm_gmem *gmem = file->private_data;
549550
struct folio *folio;
550551
struct page *page;
551-
int r;
552552

553553
if (file != slot->gmem.file) {
554554
WARN_ON_ONCE(slot->gmem.file);
555-
return -EFAULT;
555+
return ERR_PTR(-EFAULT);
556556
}
557557

558558
gmem = file->private_data;
559559
if (xa_load(&gmem->bindings, index) != slot) {
560560
WARN_ON_ONCE(xa_load(&gmem->bindings, index));
561-
return -EIO;
561+
return ERR_PTR(-EIO);
562562
}
563563

564564
folio = kvm_gmem_get_folio(file_inode(file), index, prepare);
565565
if (IS_ERR(folio))
566-
return PTR_ERR(folio);
566+
return folio;
567567

568568
if (folio_test_hwpoison(folio)) {
569569
folio_unlock(folio);
570570
folio_put(folio);
571-
return -EHWPOISON;
571+
return ERR_PTR(-EHWPOISON);
572572
}
573573

574574
page = folio_file_page(folio, index);
@@ -577,25 +577,25 @@ static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
577577
if (max_order)
578578
*max_order = 0;
579579

580-
r = 0;
581-
582580
folio_unlock(folio);
583-
584-
return r;
581+
return folio;
585582
}
586583

587584
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
588585
gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
589586
{
590587
struct file *file = kvm_gmem_get_file(slot);
591-
int r;
588+
struct folio *folio;
592589

593590
if (!file)
594591
return -EFAULT;
595592

596-
r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
593+
folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
597594
fput(file);
598-
return r;
595+
if (IS_ERR(folio))
596+
return PTR_ERR(folio);
597+
598+
return 0;
599599
}
600600
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
601601

@@ -625,6 +625,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
625625

626626
npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
627627
for (i = 0; i < npages; i += (1 << max_order)) {
628+
struct folio *folio;
628629
gfn_t gfn = start_gfn + i;
629630
kvm_pfn_t pfn;
630631

@@ -633,9 +634,11 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
633634
break;
634635
}
635636

636-
ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, false);
637-
if (ret)
637+
folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, false);
638+
if (IS_ERR(folio)) {
639+
ret = PTR_ERR(folio);
638640
break;
641+
}
639642

640643
if (!IS_ALIGNED(gfn, (1 << max_order)) ||
641644
(npages - i) < (1 << max_order))
@@ -644,7 +647,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
644647
p = src ? src + i * PAGE_SIZE : NULL;
645648
ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
646649

647-
put_page(pfn_to_page(pfn));
650+
folio_put(folio);
648651
if (ret)
649652
break;
650653
}

0 commit comments

Comments
 (0)