|
| 1 | +mm: remove follow_pfn |
| 2 | + |
| 3 | +jira LE-3557 |
| 4 | +Rebuild_History Non-Buildable kernel-5.14.0-570.26.1.el9_6 |
| 5 | +commit-author Christoph Hellwig < [email protected]> |
| 6 | +commit cb10c28ac82c9b7a5e9b3b1dc7157036c20c36dd |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-5.14.0-570.26.1.el9_6/cb10c28a.failed |
| 10 | + |
| 11 | +Remove follow_pfn now that the last user is gone. |
| 12 | + |
| 13 | +Link: https://lkml.kernel.org/r/ [email protected] |
| 14 | + Signed-off-by: Christoph Hellwig < [email protected]> |
| 15 | + Reviewed-by: David Hildenbrand < [email protected]> |
| 16 | + Cc: Andy Lutomirski < [email protected]> |
| 17 | + Cc: Dave Hansen < [email protected]> |
| 18 | + |
| 19 | + Cc: Ingo Molnar < [email protected]> |
| 20 | + Cc: Peter Zijlstra < [email protected]> |
| 21 | + Cc: Nathan Chancellor < [email protected]> |
| 22 | + Signed-off-by: Andrew Morton < [email protected]> |
| 23 | +(cherry picked from commit cb10c28ac82c9b7a5e9b3b1dc7157036c20c36dd) |
| 24 | + Signed-off-by: Jonathan Maple < [email protected]> |
| 25 | + |
| 26 | +# Conflicts: |
| 27 | +# mm/nommu.c |
| 28 | +diff --cc mm/nommu.c |
| 29 | +index f3f6a7e97647,331d2f778695..000000000000 |
| 30 | +--- a/mm/nommu.c |
| 31 | ++++ b/mm/nommu.c |
| 32 | +@@@ -110,29 -110,6 +110,32 @@@ unsigned int kobjsize(const void *objp |
| 33 | + return page_size(page); |
| 34 | + } |
| 35 | + |
| 36 | +++<<<<<<< HEAD |
| 37 | + +/** |
| 38 | + + * follow_pfn - look up PFN at a user virtual address |
| 39 | + + * @vma: memory mapping |
| 40 | + + * @address: user virtual address |
| 41 | + + * @pfn: location to store found PFN |
| 42 | + + * |
| 43 | + + * Only IO mappings and raw PFN mappings are allowed. |
| 44 | + + * |
| 45 | + + * Returns zero and the pfn at @pfn on success, -ve otherwise. |
| 46 | + + */ |
| 47 | + +int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 48 | + + unsigned long *pfn) |
| 49 | + +{ |
| 50 | + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 51 | + + return -EINVAL; |
| 52 | + + |
| 53 | + + *pfn = address >> PAGE_SHIFT; |
| 54 | + + return 0; |
| 55 | + +} |
| 56 | + +EXPORT_SYMBOL(follow_pfn); |
| 57 | + + |
| 58 | + +LIST_HEAD(vmap_area_list); |
| 59 | + + |
| 60 | +++======= |
| 61 | +++>>>>>>> cb10c28ac82c (mm: remove follow_pfn) |
| 62 | + void vfree(const void *addr) |
| 63 | + { |
| 64 | + kfree(addr); |
| 65 | +diff --git a/include/linux/mm.h b/include/linux/mm.h |
| 66 | +index 196c481ec160..cd16e4cb2ce0 100644 |
| 67 | +--- a/include/linux/mm.h |
| 68 | ++++ b/include/linux/mm.h |
| 69 | +@@ -2429,8 +2429,6 @@ int |
| 70 | + copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
| 71 | + int follow_pte(struct mm_struct *mm, unsigned long address, |
| 72 | + pte_t **ptepp, spinlock_t **ptlp); |
| 73 | +-int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 74 | +- unsigned long *pfn); |
| 75 | + int follow_phys(struct vm_area_struct *vma, unsigned long address, |
| 76 | + unsigned int flags, unsigned long *prot, resource_size_t *phys); |
| 77 | + int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
| 78 | +diff --git a/mm/memory.c b/mm/memory.c |
| 79 | +index e2794e3b8919..4498a39fb51d 100644 |
| 80 | +--- a/mm/memory.c |
| 81 | ++++ b/mm/memory.c |
| 82 | +@@ -5623,8 +5623,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
| 83 | + * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore |
| 84 | + * should be taken for read. |
| 85 | + * |
| 86 | +- * KVM uses this function. While it is arguably less bad than ``follow_pfn``, |
| 87 | +- * it is not a good general-purpose API. |
| 88 | ++ * KVM uses this function. While it is arguably less bad than the historic |
| 89 | ++ * ``follow_pfn``, it is not a good general-purpose API. |
| 90 | + * |
| 91 | + * Return: zero on success, -ve otherwise. |
| 92 | + */ |
| 93 | +@@ -5666,38 +5666,6 @@ int follow_pte(struct mm_struct *mm, unsigned long address, |
| 94 | + } |
| 95 | + EXPORT_SYMBOL_GPL(follow_pte); |
| 96 | + |
| 97 | +-/** |
| 98 | +- * follow_pfn - look up PFN at a user virtual address |
| 99 | +- * @vma: memory mapping |
| 100 | +- * @address: user virtual address |
| 101 | +- * @pfn: location to store found PFN |
| 102 | +- * |
| 103 | +- * Only IO mappings and raw PFN mappings are allowed. |
| 104 | +- * |
| 105 | +- * This function does not allow the caller to read the permissions |
| 106 | +- * of the PTE. Do not use it. |
| 107 | +- * |
| 108 | +- * Return: zero and the pfn at @pfn on success, -ve otherwise. |
| 109 | +- */ |
| 110 | +-int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 111 | +- unsigned long *pfn) |
| 112 | +-{ |
| 113 | +- int ret = -EINVAL; |
| 114 | +- spinlock_t *ptl; |
| 115 | +- pte_t *ptep; |
| 116 | +- |
| 117 | +- if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 118 | +- return ret; |
| 119 | +- |
| 120 | +- ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); |
| 121 | +- if (ret) |
| 122 | +- return ret; |
| 123 | +- *pfn = pte_pfn(ptep_get(ptep)); |
| 124 | +- pte_unmap_unlock(ptep, ptl); |
| 125 | +- return 0; |
| 126 | +-} |
| 127 | +-EXPORT_SYMBOL(follow_pfn); |
| 128 | +- |
| 129 | + #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 130 | + int follow_phys(struct vm_area_struct *vma, |
| 131 | + unsigned long address, unsigned int flags, |
| 132 | +* Unmerged path mm/nommu.c |
0 commit comments