Skip to content

Commit 79c1374

Browse files
Xu Yangbrauner
authored andcommitted
filemap: add helper mapping_max_folio_size()
Add mapping_max_folio_size() to get the maximum folio size for this pagecache mapping. Fixes: 5d8edfb ("iomap: Copy larger chunks from userspace") Cc: [email protected] Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Xu Yang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Ritesh Harjani (IBM) <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent 2c6b531 commit 79c1374

File tree

1 file changed

+21
-13
lines changed

1 file changed

+21
-13
lines changed

include/linux/pagemap.h

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
346346
m->gfp_mask = mask;
347347
}
348348

349+
/*
350+
* There are some parts of the kernel which assume that PMD entries
351+
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
352+
* limit the maximum allocation order to PMD size. I'm not aware of any
353+
* assumptions about maximum order if THP are disabled, but 8 seems like
354+
* a good order (that's 1MB if you're using 4kB pages)
355+
*/
356+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
357+
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
358+
#else
359+
#define MAX_PAGECACHE_ORDER 8
360+
#endif
361+
349362
/**
350363
* mapping_set_large_folios() - Indicate the file supports large folios.
351364
* @mapping: The file.
@@ -372,6 +385,14 @@ static inline bool mapping_large_folio_support(struct address_space *mapping)
372385
test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
373386
}
374387

388+
/* Return the maximum folio size for this pagecache mapping, in bytes. */
389+
static inline size_t mapping_max_folio_size(struct address_space *mapping)
390+
{
391+
if (mapping_large_folio_support(mapping))
392+
return PAGE_SIZE << MAX_PAGECACHE_ORDER;
393+
return PAGE_SIZE;
394+
}
395+
375396
static inline int filemap_nr_thps(struct address_space *mapping)
376397
{
377398
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -530,19 +551,6 @@ static inline void *detach_page_private(struct page *page)
530551
return folio_detach_private(page_folio(page));
531552
}
532553

533-
/*
534-
* There are some parts of the kernel which assume that PMD entries
535-
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
536-
* limit the maximum allocation order to PMD size. I'm not aware of any
537-
* assumptions about maximum order if THP are disabled, but 8 seems like
538-
* a good order (that's 1MB if you're using 4kB pages)
539-
*/
540-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
541-
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
542-
#else
543-
#define MAX_PAGECACHE_ORDER 8
544-
#endif
545-
546554
#ifdef CONFIG_NUMA
547555
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
548556
#else

0 commit comments

Comments
 (0)