Skip to content

Commit 9d9a2f2

Browse files
committed
Merge tag 'mm-hotfixes-stable-2024-07-10-13-19' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "21 hotfixes, 15 of which are cc:stable. No identifiable theme here - all are singleton patches, 19 are for MM" * tag 'mm-hotfixes-stable-2024-07-10-13-19' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits) mm/hugetlb: fix kernel NULL pointer dereference when migrating hugetlb folio mm/hugetlb: fix potential race in __update_and_free_hugetlb_folio() filemap: replace pte_offset_map() with pte_offset_map_nolock() arch/xtensa: always_inline get_current() and current_thread_info() sched.h: always_inline alloc_tag_{save|restore} to fix modpost warnings MAINTAINERS: mailmap: update Lorenzo Stoakes's email address mm: fix crashes from deferred split racing folio migration lib/build_OID_registry: avoid non-destructive substitution for Perl < 5.13.2 compat mm: gup: stop abusing try_grab_folio nilfs2: fix kernel bug on rename operation of broken directory mm/hugetlb_vmemmap: fix race with speculative PFN walkers cachestat: do not flush stats in recency check mm/shmem: disable PMD-sized page cache if needed mm/filemap: skip to create PMD-sized page cache if needed mm/readahead: limit page cache size in page_cache_ra_order() mm/filemap: make MAX_PAGECACHE_ORDER acceptable to xarray mm/damon/core: merge regions aggressively when max_nr_regions is unmet Fix userfaultfd_api to return EINVAL as expected mm: vmalloc: check if a hash-index is in cpu_possible_mask mm: prevent derefencing NULL ptr in pfn_section_valid() ...
2 parents ef2b7eb + f708f69 commit 9d9a2f2

25 files changed

+339
-286
lines changed

.mailmap

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,7 @@ Li Yang <[email protected]> <[email protected]>
384384
385385
386386
387+
387388
388389
389390

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14472,7 +14472,7 @@ MEMORY MAPPING
1447214472
M: Andrew Morton <[email protected]>
1447314473
R: Liam R. Howlett <[email protected]>
1447414474
R: Vlastimil Babka <[email protected]>
14475-
R: Lorenzo Stoakes <lstoakes@gmail.com>
14475+
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
1447614476
1447714477
S: Maintained
1447814478
W: http://www.linux-mm.org

arch/xtensa/include/asm/current.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
struct task_struct;
2121

22-
static inline struct task_struct *get_current(void)
22+
static __always_inline struct task_struct *get_current(void)
2323
{
2424
return current_thread_info()->task;
2525
}

arch/xtensa/include/asm/thread_info.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ struct thread_info {
9191
}
9292

9393
/* how to get the thread information struct from C */
94-
static inline struct thread_info *current_thread_info(void)
94+
static __always_inline struct thread_info *current_thread_info(void)
9595
{
9696
struct thread_info *ti;
9797
__asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"

fs/nilfs2/dir.c

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -383,11 +383,39 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,
383383

384384
struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop)
385385
{
386-
struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop);
386+
struct folio *folio;
387+
struct nilfs_dir_entry *de, *next_de;
388+
size_t limit;
389+
char *msg;
387390

391+
de = nilfs_get_folio(dir, 0, &folio);
388392
if (IS_ERR(de))
389393
return NULL;
390-
return nilfs_next_entry(de);
394+
395+
limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */
396+
if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
397+
!nilfs_match(1, ".", de))) {
398+
msg = "missing '.'";
399+
goto fail;
400+
}
401+
402+
next_de = nilfs_next_entry(de);
403+
/*
404+
* If "next_de" has not reached the end of the chunk, there is
405+
* at least one more record. Check whether it matches "..".
406+
*/
407+
if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
408+
!nilfs_match(2, "..", next_de))) {
409+
msg = "missing '..'";
410+
goto fail;
411+
}
412+
*foliop = folio;
413+
return next_de;
414+
415+
fail:
416+
nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
417+
folio_release_kmap(folio, de);
418+
return NULL;
391419
}
392420

393421
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)

fs/userfaultfd.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2057,7 +2057,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
20572057
goto out;
20582058
features = uffdio_api.features;
20592059
ret = -EINVAL;
2060-
if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
2060+
if (uffdio_api.api != UFFD_API)
20612061
goto err_out;
20622062
ret = -EPERM;
20632063
if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
@@ -2081,6 +2081,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
20812081
uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
20822082
uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
20832083
#endif
2084+
2085+
ret = -EINVAL;
2086+
if (features & ~uffdio_api.features)
2087+
goto err_out;
2088+
20842089
uffdio_api.ioctls = UFFD_API_IOCTLS;
20852090
ret = -EFAULT;
20862091
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))

include/linux/mmzone.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1979,8 +1979,9 @@ static inline int subsection_map_index(unsigned long pfn)
19791979
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
19801980
{
19811981
int idx = subsection_map_index(pfn);
1982+
struct mem_section_usage *usage = READ_ONCE(ms->usage);
19821983

1983-
return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
1984+
return usage ? test_bit(idx, usage->subsection_map) : 0;
19841985
}
19851986
#else
19861987
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)

include/linux/page_ref.h

Lines changed: 9 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio)
230230

231231
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
232232
{
233-
bool ret = atomic_add_unless(&page->_refcount, nr, u);
233+
bool ret = false;
234+
235+
rcu_read_lock();
236+
/* avoid writing to the vmemmap area being remapped */
237+
if (!page_is_fake_head(page) && page_ref_count(page) != u)
238+
ret = atomic_add_unless(&page->_refcount, nr, u);
239+
rcu_read_unlock();
234240

235241
if (page_ref_tracepoint_active(page_ref_mod_unless))
236242
__page_ref_mod_unless(page, nr, ret);
@@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio)
258264
return folio_ref_add_unless(folio, 1, 0);
259265
}
260266

261-
static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
262-
{
263-
#ifdef CONFIG_TINY_RCU
264-
/*
265-
* The caller guarantees the folio will not be freed from interrupt
266-
* context, so (on !SMP) we only need preemption to be disabled
267-
* and TINY_RCU does that for us.
268-
*/
269-
# ifdef CONFIG_PREEMPT_COUNT
270-
VM_BUG_ON(!in_atomic() && !irqs_disabled());
271-
# endif
272-
VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
273-
folio_ref_add(folio, count);
274-
#else
275-
if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
276-
/* Either the folio has been freed, or will be freed. */
277-
return false;
278-
}
279-
#endif
280-
return true;
281-
}
282-
283-
/**
284-
* folio_try_get_rcu - Attempt to increase the refcount on a folio.
285-
* @folio: The folio.
286-
*
287-
* This is a version of folio_try_get() optimised for non-SMP kernels.
288-
* If you are still holding the rcu_read_lock() after looking up the
289-
* page and know that the page cannot have its refcount decreased to
290-
* zero in interrupt context, you can use this instead of folio_try_get().
291-
*
292-
* Example users include get_user_pages_fast() (as pages are not unmapped
293-
* from interrupt context) and the page cache lookups (as pages are not
294-
* truncated from interrupt context). We also know that pages are not
295-
* frozen in interrupt context for the purposes of splitting or migration.
296-
*
297-
* You can also use this function if you're holding a lock that prevents
298-
* pages being frozen & removed; eg the i_pages lock for the page cache
299-
* or the mmap_lock or page table lock for page tables. In this case,
300-
* it will always succeed, and you could have used a plain folio_get(),
301-
* but it's sometimes more convenient to have a common function called
302-
* from both locked and RCU-protected contexts.
303-
*
304-
* Return: True if the reference count was successfully incremented.
305-
*/
306-
static inline bool folio_try_get_rcu(struct folio *folio)
267+
static inline bool folio_ref_try_add(struct folio *folio, int count)
307268
{
308-
return folio_ref_try_add_rcu(folio, 1);
269+
return folio_ref_add_unless(folio, count, 0);
309270
}
310271

311272
static inline int page_ref_freeze(struct page *page, int count)

include/linux/pagemap.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -354,11 +354,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
354354
* a good order (that's 1MB if you're using 4kB pages)
355355
*/
356356
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
357-
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
357+
#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
358358
#else
359-
#define MAX_PAGECACHE_ORDER 8
359+
#define PREFERRED_MAX_PAGECACHE_ORDER 8
360360
#endif
361361

362+
/*
363+
* xas_split_alloc() does not support arbitrary orders. This implies no
364+
* 512MB THP on ARM64 with 64KB base page size.
365+
*/
366+
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
367+
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
368+
362369
/**
363370
* mapping_set_large_folios() - Indicate the file supports large folios.
364371
* @mapping: The file.

include/linux/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2192,13 +2192,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
21922192
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
21932193

21942194
#ifdef CONFIG_MEM_ALLOC_PROFILING
2195-
static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
2195+
static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
21962196
{
21972197
swap(current->alloc_tag, tag);
21982198
return tag;
21992199
}
22002200

2201-
static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
2201+
static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
22022202
{
22032203
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
22042204
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");

0 commit comments

Comments
 (0)