|
20 | 20 | #include <linux/mutex.h>
|
21 | 21 | #include <linux/notifier.h>
|
22 | 22 | #include <linux/pci.h>
|
| 23 | +#include <linux/pfn_t.h> |
23 | 24 | #include <linux/pm_runtime.h>
|
24 | 25 | #include <linux/slab.h>
|
25 | 26 | #include <linux/types.h>
|
@@ -1657,45 +1658,70 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma)
|
1657 | 1658 | return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
|
1658 | 1659 | }
|
1659 | 1660 |
|
1660 |
| -static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) |
| 1661 | +static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, |
| 1662 | + unsigned int order) |
1661 | 1663 | {
|
1662 | 1664 | struct vm_area_struct *vma = vmf->vma;
|
1663 | 1665 | struct vfio_pci_core_device *vdev = vma->vm_private_data;
|
1664 | 1666 | unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
|
1665 |
| - unsigned long addr = vma->vm_start; |
1666 | 1667 | vm_fault_t ret = VM_FAULT_SIGBUS;
|
1667 | 1668 |
|
| 1669 | + if (order && (vmf->address & ((PAGE_SIZE << order) - 1) || |
| 1670 | + vmf->address + (PAGE_SIZE << order) > vma->vm_end)) { |
| 1671 | + ret = VM_FAULT_FALLBACK; |
| 1672 | + goto out; |
| 1673 | + } |
| 1674 | + |
1668 | 1675 | pfn = vma_to_pfn(vma);
|
1669 | 1676 |
|
1670 | 1677 | down_read(&vdev->memory_lock);
|
1671 | 1678 |
|
1672 | 1679 | if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
|
1673 | 1680 | goto out_unlock;
|
1674 | 1681 |
|
1675 |
| - ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); |
1676 |
| - if (ret & VM_FAULT_ERROR) |
1677 |
| - goto out_unlock; |
1678 |
| - |
1679 |
| - /* |
1680 |
| - * Pre-fault the remainder of the vma, abort further insertions and |
1681 |
| - * supress error if fault is encountered during pre-fault. |
1682 |
| - */ |
1683 |
| - for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) { |
1684 |
| - if (addr == vmf->address) |
1685 |
| - continue; |
1686 |
| - |
1687 |
| - if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) |
1688 |
| - break; |
| 1682 | + switch (order) { |
| 1683 | + case 0: |
| 1684 | + ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); |
| 1685 | + break; |
| 1686 | +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP |
| 1687 | + case PMD_ORDER: |
| 1688 | + ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff, |
| 1689 | + PFN_DEV), false); |
| 1690 | + break; |
| 1691 | +#endif |
| 1692 | +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP |
| 1693 | + case PUD_ORDER: |
| 1694 | + ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff, |
| 1695 | + PFN_DEV), false); |
| 1696 | + break; |
| 1697 | +#endif |
| 1698 | + default: |
| 1699 | + ret = VM_FAULT_FALLBACK; |
1689 | 1700 | }
|
1690 | 1701 |
|
1691 | 1702 | out_unlock:
|
1692 | 1703 | up_read(&vdev->memory_lock);
|
| 1704 | +out: |
| 1705 | + dev_dbg_ratelimited(&vdev->pdev->dev, |
| 1706 | + "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n", |
| 1707 | + __func__, order, |
| 1708 | + vma->vm_pgoff >> |
| 1709 | + (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT), |
| 1710 | + pgoff, (unsigned int)ret); |
1693 | 1711 |
|
1694 | 1712 | return ret;
|
1695 | 1713 | }
|
1696 | 1714 |
|
| 1715 | +static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf) |
| 1716 | +{ |
| 1717 | + return vfio_pci_mmap_huge_fault(vmf, 0); |
| 1718 | +} |
| 1719 | + |
1697 | 1720 | static const struct vm_operations_struct vfio_pci_mmap_ops = {
|
1698 |
| - .fault = vfio_pci_mmap_fault, |
| 1721 | + .fault = vfio_pci_mmap_page_fault, |
| 1722 | +#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP |
| 1723 | + .huge_fault = vfio_pci_mmap_huge_fault, |
| 1724 | +#endif |
1699 | 1725 | };
|
1700 | 1726 |
|
1701 | 1727 | int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
|
|
0 commit comments