Skip to content

Commit f9e54c3

Browse files
awilliamakpm00
authored andcommitted
vfio/pci: implement huge_fault support
With the addition of pfnmap support in vmf_insert_pfn_{pmd,pud}() we can take advantage of PMD and PUD faults to PCI BAR mmaps and create more efficient mappings. PCI BARs are always a power of two and will typically get at least PMD alignment without userspace even trying. Userspace alignment for PUD mappings is also not too difficult. Consolidate faults through a single handler with a new wrapper for standard single page faults. The pre-faulting behavior of commit d71a989 ("vfio/pci: Insert full vma on mmap'd MMIO fault") is removed in this refactoring since huge_fault will cover the bulk of the faults and results in more efficient page table usage. We also want to avoid that pre-faulted single page mappings preempt huge page mappings. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Alex Williamson <[email protected]> Signed-off-by: Peter Xu <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Gavin Shan <[email protected]> Cc: Gerald Schaefer <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Niklas Schnelle <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: Sean Christopherson <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Will Deacon <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 3e509c9 commit f9e54c3

File tree

1 file changed

+43
-17
lines changed

1 file changed

+43
-17
lines changed

drivers/vfio/pci/vfio_pci_core.c

Lines changed: 43 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/mutex.h>
2121
#include <linux/notifier.h>
2222
#include <linux/pci.h>
23+
#include <linux/pfn_t.h>
2324
#include <linux/pm_runtime.h>
2425
#include <linux/slab.h>
2526
#include <linux/types.h>
@@ -1657,45 +1658,70 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma)
16571658
return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
16581659
}
16591660

1660-
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
1661+
static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
1662+
unsigned int order)
16611663
{
16621664
struct vm_area_struct *vma = vmf->vma;
16631665
struct vfio_pci_core_device *vdev = vma->vm_private_data;
16641666
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
1665-
unsigned long addr = vma->vm_start;
16661667
vm_fault_t ret = VM_FAULT_SIGBUS;
16671668

1669+
if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
1670+
vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
1671+
ret = VM_FAULT_FALLBACK;
1672+
goto out;
1673+
}
1674+
16681675
pfn = vma_to_pfn(vma);
16691676

16701677
down_read(&vdev->memory_lock);
16711678

16721679
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
16731680
goto out_unlock;
16741681

1675-
ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
1676-
if (ret & VM_FAULT_ERROR)
1677-
goto out_unlock;
1678-
1679-
/*
1680-
* Pre-fault the remainder of the vma, abort further insertions and
1681-
* supress error if fault is encountered during pre-fault.
1682-
*/
1683-
for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
1684-
if (addr == vmf->address)
1685-
continue;
1686-
1687-
if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
1688-
break;
1682+
switch (order) {
1683+
case 0:
1684+
ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
1685+
break;
1686+
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
1687+
case PMD_ORDER:
1688+
ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
1689+
PFN_DEV), false);
1690+
break;
1691+
#endif
1692+
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
1693+
case PUD_ORDER:
1694+
ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
1695+
PFN_DEV), false);
1696+
break;
1697+
#endif
1698+
default:
1699+
ret = VM_FAULT_FALLBACK;
16891700
}
16901701

16911702
out_unlock:
16921703
up_read(&vdev->memory_lock);
1704+
out:
1705+
dev_dbg_ratelimited(&vdev->pdev->dev,
1706+
"%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n",
1707+
__func__, order,
1708+
vma->vm_pgoff >>
1709+
(VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT),
1710+
pgoff, (unsigned int)ret);
16931711

16941712
return ret;
16951713
}
16961714

1715+
static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf)
1716+
{
1717+
return vfio_pci_mmap_huge_fault(vmf, 0);
1718+
}
1719+
16971720
static const struct vm_operations_struct vfio_pci_mmap_ops = {
1698-
.fault = vfio_pci_mmap_fault,
1721+
.fault = vfio_pci_mmap_page_fault,
1722+
#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
1723+
.huge_fault = vfio_pci_mmap_huge_fault,
1724+
#endif
16991725
};
17001726

17011727
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)

0 commit comments

Comments
 (0)