Skip to content

Commit 8e59a6a

Browse files
committed
Merge tag 'mm-hotfixes-stable-2022-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton: "Mainly MM fixes. About half for issues which were introduced after 5.18 and the remainder for longer-term issues" * tag 'mm-hotfixes-stable-2022-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm: split huge PUD on wp_huge_pud fallback nilfs2: fix incorrect masking of permission flags for symlinks mm/rmap: fix dereferencing invalid subpage pointer in try_to_migrate_one() riscv/mm: fix build error while PAGE_TABLE_CHECK enabled without MMU Documentation: highmem: use literal block for code example in highmem.h comment mm: sparsemem: fix missing higher order allocation splitting mm/damon: use set_huge_pte_at() to make huge pte old sh: convert nommu io{re,un}map() to static inline functions mm: userfaultfd: fix UFFDIO_CONTINUE on fallocated shmem pages
2 parents b537439 + 14c99d6 commit 8e59a6a

File tree

9 files changed

+63
-38
lines changed

9 files changed

+63
-38
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ config RISCV
3838
select ARCH_SUPPORTS_ATOMIC_RMW
3939
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
4040
select ARCH_SUPPORTS_HUGETLBFS if MMU
41-
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
41+
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
4242
select ARCH_USE_MEMTEST
4343
select ARCH_USE_QUEUED_RWLOCKS
4444
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU

arch/sh/include/asm/io.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
271271
#endif /* CONFIG_HAVE_IOREMAP_PROT */
272272

273273
#else /* CONFIG_MMU */
274-
#define iounmap(addr) do { } while (0)
275-
#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
274+
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
275+
{
276+
return (void __iomem *)(unsigned long)offset;
277+
}
278+
279+
static inline void iounmap(volatile void __iomem *addr) { }
276280
#endif /* CONFIG_MMU */
277281

278282
#define ioremap_uc ioremap

fs/nilfs2/nilfs.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,9 @@ static inline int nilfs_acl_chmod(struct inode *inode)
198198

199199
static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
200200
{
201+
if (S_ISLNK(inode->i_mode))
202+
return 0;
203+
201204
inode->i_mode &= ~current_umask();
202205
return 0;
203206
}

include/linux/highmem.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
149149
* It is used in atomic context when code wants to access the contents of a
150150
* page that might be allocated from high memory (see __GFP_HIGHMEM), for
151151
* example a page in the pagecache. The API has two functions, and they
152-
* can be used in a manner similar to the following:
152+
* can be used in a manner similar to the following::
153153
*
154-
* -- Find the page of interest. --
155-
* struct page *page = find_get_page(mapping, offset);
154+
* // Find the page of interest.
155+
* struct page *page = find_get_page(mapping, offset);
156156
*
157-
* -- Gain access to the contents of that page. --
158-
* void *vaddr = kmap_atomic(page);
157+
* // Gain access to the contents of that page.
158+
* void *vaddr = kmap_atomic(page);
159159
*
160-
* -- Do something to the contents of that page. --
161-
* memset(vaddr, 0, PAGE_SIZE);
160+
* // Do something to the contents of that page.
161+
* memset(vaddr, 0, PAGE_SIZE);
162162
*
163-
* -- Unmap that page. --
164-
* kunmap_atomic(vaddr);
163+
* // Unmap that page.
164+
* kunmap_atomic(vaddr);
165165
*
166166
* Note that the kunmap_atomic() call takes the result of the kmap_atomic()
167167
* call, not the argument.

mm/damon/vaddr.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -336,8 +336,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
336336
if (pte_young(entry)) {
337337
referenced = true;
338338
entry = pte_mkold(entry);
339-
huge_ptep_set_access_flags(vma, addr, pte, entry,
340-
vma->vm_flags & VM_WRITE);
339+
set_huge_pte_at(mm, addr, pte, entry);
341340
}
342341

343342
#ifdef CONFIG_MMU_NOTIFIER

mm/memory.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4798,6 +4798,19 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
47984798

47994799
static vm_fault_t create_huge_pud(struct vm_fault *vmf)
48004800
{
4801+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4802+
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4803+
/* No support for anonymous transparent PUD pages yet */
4804+
if (vma_is_anonymous(vmf->vma))
4805+
return VM_FAULT_FALLBACK;
4806+
if (vmf->vma->vm_ops->huge_fault)
4807+
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4808+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4809+
return VM_FAULT_FALLBACK;
4810+
}
4811+
4812+
static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4813+
{
48014814
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
48024815
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
48034816
/* No support for anonymous transparent PUD pages yet */
@@ -4812,19 +4825,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
48124825
split:
48134826
/* COW or write-notify not handled on PUD level: split pud.*/
48144827
__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4815-
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4816-
return VM_FAULT_FALLBACK;
4817-
}
4818-
4819-
static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4820-
{
4821-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4822-
/* No support for anonymous transparent PUD pages yet */
4823-
if (vma_is_anonymous(vmf->vma))
4824-
return VM_FAULT_FALLBACK;
4825-
if (vmf->vma->vm_ops->huge_fault)
4826-
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4827-
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4828+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
48284829
return VM_FAULT_FALLBACK;
48294830
}
48304831

mm/rmap.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1899,8 +1899,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
18991899
/* Unexpected PMD-mapped THP? */
19001900
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
19011901

1902-
subpage = folio_page(folio,
1903-
pte_pfn(*pvmw.pte) - folio_pfn(folio));
1902+
if (folio_is_zone_device(folio)) {
1903+
/*
1904+
* Our PTE is a non-present device exclusive entry and
1905+
* calculating the subpage as for the common case would
1906+
* result in an invalid pointer.
1907+
*
1908+
* Since only PAGE_SIZE pages can currently be
1909+
* migrated, just set it to page. This will need to be
1910+
* changed when hugepage migrations to device private
1911+
* memory are supported.
1912+
*/
1913+
VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1914+
subpage = &folio->page;
1915+
} else {
1916+
subpage = folio_page(folio,
1917+
pte_pfn(*pvmw.pte) - folio_pfn(folio));
1918+
}
19041919
address = pvmw.address;
19051920
anon_exclusive = folio_test_anon(folio) &&
19061921
PageAnonExclusive(subpage);
@@ -1993,15 +2008,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
19932008
/*
19942009
* No need to invalidate here it will synchronize on
19952010
* against the special swap migration pte.
1996-
*
1997-
* The assignment to subpage above was computed from a
1998-
* swap PTE which results in an invalid pointer.
1999-
* Since only PAGE_SIZE pages can currently be
2000-
* migrated, just set it to page. This will need to be
2001-
* changed when hugepage migrations to device private
2002-
* memory are supported.
20032011
*/
2004-
subpage = &folio->page;
20052012
} else if (PageHWPoison(subpage)) {
20062013
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
20072014
if (folio_test_hugetlb(folio)) {

mm/sparse-vmemmap.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,14 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
7878

7979
spin_lock(&init_mm.page_table_lock);
8080
if (likely(pmd_leaf(*pmd))) {
81+
/*
82+
* Higher order allocations from buddy allocator must be able to
83+
* be treated as indepdenent small pages (as they can be freed
84+
* individually).
85+
*/
86+
if (!PageReserved(page))
87+
split_page(page, get_order(PMD_SIZE));
88+
8189
/* Make pte visible before pmd. See comment in pmd_install(). */
8290
smp_wmb();
8391
pmd_populate_kernel(&init_mm, pmd, pgtable);

mm/userfaultfd.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,10 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
246246
struct page *page;
247247
int ret;
248248

249-
ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
249+
ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC);
250+
/* Our caller expects us to return -EFAULT if we failed to find page. */
251+
if (ret == -ENOENT)
252+
ret = -EFAULT;
250253
if (ret)
251254
goto out;
252255
if (!page) {

0 commit comments

Comments
 (0)