|
| 1 | +mm: move follow_phys to arch/x86/mm/pat/memtype.c |
| 2 | + |
| 3 | +jira LE-3557 |
| 4 | +Rebuild_History Non-Buildable kernel-5.14.0-570.26.1.el9_6 |
| 5 | +commit-author Christoph Hellwig < [email protected]> |
| 6 | +commit 5b34b76cb0cd8a21dee5c7677eae98480b0d05cc |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-5.14.0-570.26.1.el9_6/5b34b76c.failed |
| 10 | + |
| 11 | +follow_phys is only used by two callers in arch/x86/mm/pat/memtype.c. |
| 12 | +Move it there and hardcode the two arguments that get the same values |
| 13 | +passed by both callers. |
| 14 | + |
| 15 | +[ [email protected]: conflict resolutions] |
| 16 | +Link: https://lkml.kernel.org/r/ [email protected] |
| 17 | +Link: https://lkml.kernel.org/r/ [email protected] |
| 18 | + Signed-off-by: Christoph Hellwig < [email protected]> |
| 19 | + Signed-off-by: David Hildenbrand < [email protected]> |
| 20 | + Reviewed-by: David Hildenbrand < [email protected]> |
| 21 | + Cc: Andy Lutomirski < [email protected]> |
| 22 | + Cc: Dave Hansen < [email protected]> |
| 23 | + |
| 24 | + Cc: Ingo Molnar < [email protected]> |
| 25 | + Cc: Peter Zijlstra < [email protected]> |
| 26 | + Cc: Nathan Chancellor < [email protected]> |
| 27 | + Signed-off-by: Andrew Morton < [email protected]> |
| 28 | +(cherry picked from commit 5b34b76cb0cd8a21dee5c7677eae98480b0d05cc) |
| 29 | + Signed-off-by: Jonathan Maple < [email protected]> |
| 30 | + |
| 31 | +# Conflicts: |
| 32 | +# include/linux/mm.h |
| 33 | +diff --cc include/linux/mm.h |
| 34 | +index 196c481ec160,5dc65618e386..000000000000 |
| 35 | +--- a/include/linux/mm.h |
| 36 | ++++ b/include/linux/mm.h |
| 37 | +@@@ -2429,10 -2424,6 +2429,13 @@@ in |
| 38 | + copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
| 39 | + int follow_pte(struct mm_struct *mm, unsigned long address, |
| 40 | + pte_t **ptepp, spinlock_t **ptlp); |
| 41 | +++<<<<<<< HEAD |
| 42 | + +int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 43 | + + unsigned long *pfn); |
| 44 | + +int follow_phys(struct vm_area_struct *vma, unsigned long address, |
| 45 | + + unsigned int flags, unsigned long *prot, resource_size_t *phys); |
| 46 | +++======= |
| 47 | +++>>>>>>> 5b34b76cb0cd (mm: move follow_phys to arch/x86/mm/pat/memtype.c) |
| 48 | + int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
| 49 | + void *buf, int len, int write); |
| 50 | + |
| 51 | +diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c |
| 52 | +index 36b603d0cdde..d01c3b0bd6eb 100644 |
| 53 | +--- a/arch/x86/mm/pat/memtype.c |
| 54 | ++++ b/arch/x86/mm/pat/memtype.c |
| 55 | +@@ -39,6 +39,7 @@ |
| 56 | + #include <linux/pfn_t.h> |
| 57 | + #include <linux/slab.h> |
| 58 | + #include <linux/mm.h> |
| 59 | ++#include <linux/highmem.h> |
| 60 | + #include <linux/fs.h> |
| 61 | + #include <linux/rbtree.h> |
| 62 | + |
| 63 | +@@ -947,6 +948,32 @@ static void free_pfn_range(u64 paddr, unsigned long size) |
| 64 | + memtype_free(paddr, paddr + size); |
| 65 | + } |
| 66 | + |
| 67 | ++static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, |
| 68 | ++ resource_size_t *phys) |
| 69 | ++{ |
| 70 | ++ pte_t *ptep, pte; |
| 71 | ++ spinlock_t *ptl; |
| 72 | ++ |
| 73 | ++ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 74 | ++ return -EINVAL; |
| 75 | ++ |
| 76 | ++ if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl)) |
| 77 | ++ return -EINVAL; |
| 78 | ++ |
| 79 | ++ pte = ptep_get(ptep); |
| 80 | ++ |
| 81 | ++ /* Never return PFNs of anon folios in COW mappings. */ |
| 82 | ++ if (vm_normal_folio(vma, vma->vm_start, pte)) { |
| 83 | ++ pte_unmap_unlock(ptep, ptl); |
| 84 | ++ return -EINVAL; |
| 85 | ++ } |
| 86 | ++ |
| 87 | ++ *prot = pgprot_val(pte_pgprot(pte)); |
| 88 | ++ *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; |
| 89 | ++ pte_unmap_unlock(ptep, ptl); |
| 90 | ++ return 0; |
| 91 | ++} |
| 92 | ++ |
| 93 | + static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, |
| 94 | + pgprot_t *pgprot) |
| 95 | + { |
| 96 | +@@ -964,7 +991,7 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, |
| 97 | + * detect the PFN. If we need the cachemode as well, we're out of luck |
| 98 | + * for now and have to fail fork(). |
| 99 | + */ |
| 100 | +- if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) { |
| 101 | ++ if (!follow_phys(vma, &prot, paddr)) { |
| 102 | + if (pgprot) |
| 103 | + *pgprot = __pgprot(prot); |
| 104 | + return 0; |
| 105 | +* Unmerged path include/linux/mm.h |
| 106 | +diff --git a/mm/memory.c b/mm/memory.c |
| 107 | +index e2794e3b8919..257618e95c0e 100644 |
| 108 | +--- a/mm/memory.c |
| 109 | ++++ b/mm/memory.c |
| 110 | +@@ -5699,38 +5699,6 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 111 | + EXPORT_SYMBOL(follow_pfn); |
| 112 | + |
| 113 | + #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 114 | +-int follow_phys(struct vm_area_struct *vma, |
| 115 | +- unsigned long address, unsigned int flags, |
| 116 | +- unsigned long *prot, resource_size_t *phys) |
| 117 | +-{ |
| 118 | +- int ret = -EINVAL; |
| 119 | +- pte_t *ptep, pte; |
| 120 | +- spinlock_t *ptl; |
| 121 | +- |
| 122 | +- if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 123 | +- goto out; |
| 124 | +- |
| 125 | +- if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) |
| 126 | +- goto out; |
| 127 | +- pte = ptep_get(ptep); |
| 128 | +- |
| 129 | +- /* Never return PFNs of anon folios in COW mappings. */ |
| 130 | +- if (vm_normal_folio(vma, address, pte)) |
| 131 | +- goto unlock; |
| 132 | +- |
| 133 | +- if ((flags & FOLL_WRITE) && !pte_write(pte)) |
| 134 | +- goto unlock; |
| 135 | +- |
| 136 | +- *prot = pgprot_val(pte_pgprot(pte)); |
| 137 | +- *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; |
| 138 | +- |
| 139 | +- ret = 0; |
| 140 | +-unlock: |
| 141 | +- pte_unmap_unlock(ptep, ptl); |
| 142 | +-out: |
| 143 | +- return ret; |
| 144 | +-} |
| 145 | +- |
| 146 | + /** |
| 147 | + * generic_access_phys - generic implementation for iomem mmap access |
| 148 | + * @vma: the vma to access |
0 commit comments