Skip to content

Commit 91325f3

Browse files
committed
Merge tag 'mm-hotfixes-stable-2025-08-12-20-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "12 hotfixes. 5 are cc:stable and the remainder address post-6.16 issues or aren't considered necessary for -stable kernels. 10 of these fixes are for MM" * tag 'mm-hotfixes-stable-2025-08-12-20-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: proc: proc_maps_open allow proc_mem_open to return NULL mm/mremap: avoid expensive folio lookup on mremap folio pte batch userfaultfd: fix a crash in UFFDIO_MOVE when PMD is a migration entry mm: pass page directly instead of using folio_page selftests/proc: fix string literal warning in proc-maps-race.c fs/proc/task_mmu: hold PTL in pagemap_hugetlb_range and gather_hugetlb_stats mm/smaps: fix race between smaps_hugetlb_range and migration mm: fix the race between collapse and PT_RECLAIM under per-vma lock mm/kmemleak: avoid soft lockup in __kmemleak_do_cleanup() MAINTAINERS: add Masami as a reviewer of hung task detector mm/kmemleak: avoid deadlock by moving pr_warn() outside kmemleak_lock kasan/test: fix protection against compiler elision
2 parents 8742b2d + c0e1b77 commit 91325f3

File tree

9 files changed

+57
-32
lines changed

9 files changed

+57
-32
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11438,6 +11438,7 @@ F: drivers/tty/hvc/
1143811438
HUNG TASK DETECTOR
1143911439
M: Andrew Morton <[email protected]>
1144011440
R: Lance Yang <[email protected]>
11441+
R: Masami Hiramatsu <[email protected]>
1144111442
1144211443
S: Maintained
1144311444
F: include/linux/hung_task.h

fs/proc/task_mmu.c

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -340,8 +340,8 @@ static int proc_maps_open(struct inode *inode, struct file *file,
340340

341341
priv->inode = inode;
342342
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
343-
if (IS_ERR_OR_NULL(priv->mm)) {
344-
int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
343+
if (IS_ERR(priv->mm)) {
344+
int err = PTR_ERR(priv->mm);
345345

346346
seq_release_private(inode, file);
347347
return err;
@@ -1148,10 +1148,13 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
11481148
{
11491149
struct mem_size_stats *mss = walk->private;
11501150
struct vm_area_struct *vma = walk->vma;
1151-
pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
11521151
struct folio *folio = NULL;
11531152
bool present = false;
1153+
spinlock_t *ptl;
1154+
pte_t ptent;
11541155

1156+
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
1157+
ptent = huge_ptep_get(walk->mm, addr, pte);
11551158
if (pte_present(ptent)) {
11561159
folio = page_folio(pte_page(ptent));
11571160
present = true;
@@ -1170,6 +1173,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
11701173
else
11711174
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
11721175
}
1176+
spin_unlock(ptl);
11731177
return 0;
11741178
}
11751179
#else
@@ -2017,12 +2021,14 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
20172021
struct pagemapread *pm = walk->private;
20182022
struct vm_area_struct *vma = walk->vma;
20192023
u64 flags = 0, frame = 0;
2024+
spinlock_t *ptl;
20202025
int err = 0;
20212026
pte_t pte;
20222027

20232028
if (vma->vm_flags & VM_SOFTDIRTY)
20242029
flags |= PM_SOFT_DIRTY;
20252030

2031+
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep);
20262032
pte = huge_ptep_get(walk->mm, addr, ptep);
20272033
if (pte_present(pte)) {
20282034
struct folio *folio = page_folio(pte_page(pte));
@@ -2050,11 +2056,12 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
20502056

20512057
err = add_to_pagemap(&pme, pm);
20522058
if (err)
2053-
return err;
2059+
break;
20542060
if (pm->show_pfn && (flags & PM_PRESENT))
20552061
frame++;
20562062
}
20572063

2064+
spin_unlock(ptl);
20582065
cond_resched();
20592066

20602067
return err;
@@ -3128,17 +3135,22 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
31283135
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
31293136
unsigned long addr, unsigned long end, struct mm_walk *walk)
31303137
{
3131-
pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
3138+
pte_t huge_pte;
31323139
struct numa_maps *md;
31333140
struct page *page;
3141+
spinlock_t *ptl;
31343142

3143+
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
3144+
huge_pte = huge_ptep_get(walk->mm, addr, pte);
31353145
if (!pte_present(huge_pte))
3136-
return 0;
3146+
goto out;
31373147

31383148
page = pte_page(huge_pte);
31393149

31403150
md = walk->private;
31413151
gather_stats(page, md, pte_dirty(huge_pte), 1);
3152+
out:
3153+
spin_unlock(ptl);
31423154
return 0;
31433155
}
31443156

mm/kasan/kasan_test_c.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static struct {
4747
* Some tests use these global variables to store return values from function
4848
* calls that could otherwise be eliminated by the compiler as dead code.
4949
*/
50-
static volatile void *kasan_ptr_result;
50+
static void *volatile kasan_ptr_result;
5151
static volatile int kasan_int_result;
5252

5353
/* Probe for console output: obtains test_status lines of interest. */

mm/khugepaged.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1172,11 +1172,11 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
11721172
if (result != SCAN_SUCCEED)
11731173
goto out_up_write;
11741174
/* check if the pmd is still valid */
1175+
vma_start_write(vma);
11751176
result = check_pmd_still_valid(mm, address, pmd);
11761177
if (result != SCAN_SUCCEED)
11771178
goto out_up_write;
11781179

1179-
vma_start_write(vma);
11801180
anon_vma_lock_write(vma->anon_vma);
11811181

11821182
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,

mm/kmemleak.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
470470
{
471471
unsigned long flags;
472472
struct kmemleak_object *object;
473+
bool warn = false;
473474

474475
/* try the slab allocator first */
475476
if (object_cache) {
@@ -488,8 +489,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
488489
else if (mem_pool_free_count)
489490
object = &mem_pool[--mem_pool_free_count];
490491
else
491-
pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
492+
warn = true;
492493
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
494+
if (warn)
495+
pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
493496

494497
return object;
495498
}
@@ -2181,6 +2184,7 @@ static const struct file_operations kmemleak_fops = {
21812184
static void __kmemleak_do_cleanup(void)
21822185
{
21832186
struct kmemleak_object *object, *tmp;
2187+
unsigned int cnt = 0;
21842188

21852189
/*
21862190
* Kmemleak has already been disabled, no need for RCU list traversal
@@ -2189,6 +2193,10 @@ static void __kmemleak_do_cleanup(void)
21892193
list_for_each_entry_safe(object, tmp, &object_list, object_list) {
21902194
__remove_object(object);
21912195
__delete_object(object);
2196+
2197+
/* Call cond_resched() once per 64 iterations to avoid soft lockup */
2198+
if (!(++cnt & 0x3f))
2199+
cond_resched();
21922200
}
21932201
}
21942202

mm/mprotect.c

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
120120

121121
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
122122
pte_t oldpte, pte_t *pte, int target_node,
123-
struct folio **foliop)
123+
struct folio *folio)
124124
{
125-
struct folio *folio = NULL;
126125
bool ret = true;
127126
bool toptier;
128127
int nid;
@@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
131130
if (pte_protnone(oldpte))
132131
goto skip;
133132

134-
folio = vm_normal_folio(vma, addr, oldpte);
135133
if (!folio)
136134
goto skip;
137135

@@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
173171
folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
174172

175173
skip:
176-
*foliop = folio;
177174
return ret;
178175
}
179176

@@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
231228
* retrieve sub-batches.
232229
*/
233230
static void commit_anon_folio_batch(struct vm_area_struct *vma,
234-
struct folio *folio, unsigned long addr, pte_t *ptep,
231+
struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
235232
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
236233
{
237-
struct page *first_page = folio_page(folio, 0);
238234
bool expected_anon_exclusive;
239235
int sub_batch_idx = 0;
240236
int len;
@@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
251247
}
252248

253249
static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
254-
struct folio *folio, unsigned long addr, pte_t *ptep,
250+
struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
255251
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
256252
{
257253
bool set_write;
@@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
270266
/* idx = */ 0, set_write, tlb);
271267
return;
272268
}
273-
commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb);
269+
commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
274270
}
275271

276272
static long change_pte_range(struct mmu_gather *tlb,
@@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb,
305301
const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
306302
int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
307303
struct folio *folio = NULL;
304+
struct page *page;
308305
pte_t ptent;
309306

307+
page = vm_normal_page(vma, addr, oldpte);
308+
if (page)
309+
folio = page_folio(page);
310310
/*
311311
* Avoid trapping faults against the zero or KSM
312312
* pages. See similar comment in change_huge_pmd.
313313
*/
314314
if (prot_numa) {
315315
int ret = prot_numa_skip(vma, addr, oldpte, pte,
316-
target_node, &folio);
316+
target_node, folio);
317317
if (ret) {
318318

319319
/* determine batch to skip */
@@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb,
323323
}
324324
}
325325

326-
if (!folio)
327-
folio = vm_normal_folio(vma, addr, oldpte);
328-
329326
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
330327

331328
oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
@@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb,
351348
*/
352349
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
353350
!pte_write(ptent))
354-
set_write_prot_commit_flush_ptes(vma, folio,
351+
set_write_prot_commit_flush_ptes(vma, folio, page,
355352
addr, pte, oldpte, ptent, nr_ptes, tlb);
356353
else
357354
prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,

mm/mremap.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,10 @@ static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr
179179
if (max_nr == 1)
180180
return 1;
181181

182+
/* Avoid expensive folio lookup if we stand no chance of benefit. */
183+
if (pte_batch_hint(ptep, pte) == 1)
184+
return 1;
185+
182186
folio = vm_normal_folio(vma, addr, pte);
183187
if (!folio || !folio_test_large(folio))
184188
return 1;

mm/userfaultfd.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1821,13 +1821,16 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
18211821
/* Check if we can move the pmd without splitting it. */
18221822
if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
18231823
!pmd_none(dst_pmdval)) {
1824-
struct folio *folio = pmd_folio(*src_pmd);
1825-
1826-
if (!folio || (!is_huge_zero_folio(folio) &&
1827-
!PageAnonExclusive(&folio->page))) {
1828-
spin_unlock(ptl);
1829-
err = -EBUSY;
1830-
break;
1824+
/* Can be a migration entry */
1825+
if (pmd_present(*src_pmd)) {
1826+
struct folio *folio = pmd_folio(*src_pmd);
1827+
1828+
if (!is_huge_zero_folio(folio) &&
1829+
!PageAnonExclusive(&folio->page)) {
1830+
spin_unlock(ptl);
1831+
err = -EBUSY;
1832+
break;
1833+
}
18311834
}
18321835

18331836
spin_unlock(ptl);

tools/testing/selftests/proc/proc-maps-race.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -202,11 +202,11 @@ static void print_first_lines(char *text, int nr)
202202
int offs = end - text;
203203

204204
text[offs] = '\0';
205-
printf(text);
205+
printf("%s", text);
206206
text[offs] = '\n';
207207
printf("\n");
208208
} else {
209-
printf(text);
209+
printf("%s", text);
210210
}
211211
}
212212

@@ -221,7 +221,7 @@ static void print_last_lines(char *text, int nr)
221221
nr--;
222222
start--;
223223
}
224-
printf(start);
224+
printf("%s", start);
225225
}
226226

227227
static void print_boundaries(const char *title, FIXTURE_DATA(proc_maps_race) *self)

0 commit comments

Comments
 (0)