Skip to content

Commit a32e7ea

Browse files
committed
Merge tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache
Pull folio fixes from Matthew Wilcox: "Four folio-related fixes: - Don't release a folio while it's still locked - Fix a use-after-free after dropping the mmap_lock - Fix a memory leak when splitting a page - Fix a kernel-doc warning for struct folio" * tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache: mm: Add kernel-doc for folio->mlock_count mm/huge_memory: Fix xarray node memory leak filemap: Cache the value of vm_flags filemap: Don't release a locked folio
2 parents aa3398f + 334f6f5 commit a32e7ea

File tree

6 files changed

+17
-8
lines changed

6 files changed

+17
-8
lines changed

include/linux/mm_types.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ struct page {
227227
* struct folio - Represents a contiguous set of bytes.
228228
* @flags: Identical to the page flags.
229229
* @lru: Least Recently Used list; tracks how recently this folio was used.
230+
* @mlock_count: Number of times this folio has been pinned by mlock().
230231
* @mapping: The file this page belongs to, or refers to the anon_vma for
231232
* anonymous memory.
232233
* @index: Offset within the file, in units of pages. For anonymous memory,
@@ -255,10 +256,14 @@ struct folio {
255256
unsigned long flags;
256257
union {
257258
struct list_head lru;
259+
/* private: avoid cluttering the output */
258260
struct {
259261
void *__filler;
262+
/* public: */
260263
unsigned int mlock_count;
264+
/* private: */
261265
};
266+
/* public: */
262267
};
263268
struct address_space *mapping;
264269
pgoff_t index;

include/linux/xarray.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
15081508
void xas_init_marks(const struct xa_state *);
15091509

15101510
bool xas_nomem(struct xa_state *, gfp_t);
1511+
void xas_destroy(struct xa_state *);
15111512
void xas_pause(struct xa_state *);
15121513

15131514
void xas_create_range(struct xa_state *);

lib/xarray.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node)
264264
* xas_destroy() - Free any resources allocated during the XArray operation.
265265
* @xas: XArray operation state.
266266
*
267-
* This function is now internal-only.
267+
* Most users will not need to call this function; it is called for you
268+
* by xas_nomem().
268269
*/
269-
static void xas_destroy(struct xa_state *xas)
270+
void xas_destroy(struct xa_state *xas)
270271
{
271272
struct xa_node *next, *node = xas->xa_alloc;
272273

mm/filemap.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2991,19 +2991,20 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
29912991
struct address_space *mapping = file->f_mapping;
29922992
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
29932993
struct file *fpin = NULL;
2994+
unsigned long vm_flags = vmf->vma->vm_flags;
29942995
unsigned int mmap_miss;
29952996

29962997
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
29972998
/* Use the readahead code, even if readahead is disabled */
2998-
if (vmf->vma->vm_flags & VM_HUGEPAGE) {
2999+
if (vm_flags & VM_HUGEPAGE) {
29993000
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
30003001
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
30013002
ra->size = HPAGE_PMD_NR;
30023003
/*
30033004
* Fetch two PMD folios, so we get the chance to actually
30043005
* readahead, unless we've been told not to.
30053006
*/
3006-
if (!(vmf->vma->vm_flags & VM_RAND_READ))
3007+
if (!(vm_flags & VM_RAND_READ))
30073008
ra->size *= 2;
30083009
ra->async_size = HPAGE_PMD_NR;
30093010
page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
30123013
#endif
30133014

30143015
/* If we don't want any read-ahead, don't bother */
3015-
if (vmf->vma->vm_flags & VM_RAND_READ)
3016+
if (vm_flags & VM_RAND_READ)
30163017
return fpin;
30173018
if (!ra->ra_pages)
30183019
return fpin;
30193020

3020-
if (vmf->vma->vm_flags & VM_SEQ_READ) {
3021+
if (vm_flags & VM_SEQ_READ) {
30213022
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
30223023
page_cache_sync_ra(&ractl, ra->ra_pages);
30233024
return fpin;

mm/huge_memory.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2672,8 +2672,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26722672
if (mapping)
26732673
i_mmap_unlock_read(mapping);
26742674
out:
2675-
/* Free any memory we didn't use */
2676-
xas_nomem(&xas, 0);
2675+
xas_destroy(&xas);
26772676
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
26782677
return ret;
26792678
}

mm/readahead.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac)
164164
while ((folio = readahead_folio(rac)) != NULL) {
165165
unsigned long nr = folio_nr_pages(folio);
166166

167+
folio_get(folio);
167168
rac->ra->size -= nr;
168169
if (rac->ra->async_size >= nr) {
169170
rac->ra->async_size -= nr;
170171
filemap_remove_folio(folio);
171172
}
172173
folio_unlock(folio);
174+
folio_put(folio);
173175
}
174176
} else {
175177
while ((folio = readahead_folio(rac)) != NULL)

0 commit comments

Comments
 (0)