Skip to content

Commit e220917

Browse files
mcgrofbrauner
authored andcommitted
mm: split a folio in minimum folio order chunks
split_folio() and split_folio_to_list() assume order 0, to support minorder for non-anonymous folios, we must expand these to check the folio mapping order and use that. Set new_order to be at least minimum folio order if it is set in split_huge_page_to_list() so that we can maintain minimum folio order requirement in the page cache. Update the debugfs write files used for testing to ensure the order is respected as well. We simply enforce the min order when a file mapping is used. Signed-off-by: Luis Chamberlain <[email protected]> Signed-off-by: Pankaj Raghav <[email protected]> Link: https://lore.kernel.org/r/[email protected] # folded fix Link: https://lore.kernel.org/r/[email protected] Tested-by: David Howells <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Reviewed-by: Zi Yan <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent 26cfdb3 commit e220917

File tree

2 files changed

+85
-8
lines changed

2 files changed

+85
-8
lines changed

include/linux/huge_mm.h

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
9696
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
9797
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
9898

99+
#define split_folio(f) split_folio_to_list(f, NULL)
100+
99101
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
100102
#define HPAGE_PMD_SHIFT PMD_SHIFT
101103
#define HPAGE_PUD_SHIFT PUD_SHIFT
@@ -317,9 +319,24 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
317319
bool can_split_folio(struct folio *folio, int *pextra_pins);
318320
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
319321
unsigned int new_order);
322+
int min_order_for_split(struct folio *folio);
323+
int split_folio_to_list(struct folio *folio, struct list_head *list);
320324
static inline int split_huge_page(struct page *page)
321325
{
322-
return split_huge_page_to_list_to_order(page, NULL, 0);
326+
struct folio *folio = page_folio(page);
327+
int ret = min_order_for_split(folio);
328+
329+
if (ret < 0)
330+
return ret;
331+
332+
/*
333+
* split_huge_page() locks the page before splitting and
334+
* expects the same page that has been split to be locked when
335+
* returned. split_folio(page_folio(page)) cannot be used here
336+
* because it converts the page to folio and passes the head
337+
* page to be split.
338+
*/
339+
return split_huge_page_to_list_to_order(page, NULL, ret);
323340
}
324341
void deferred_split_folio(struct folio *folio);
325342

@@ -484,6 +501,12 @@ static inline int split_huge_page(struct page *page)
484501
{
485502
return 0;
486503
}
504+
505+
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
506+
{
507+
return 0;
508+
}
509+
487510
static inline void deferred_split_folio(struct folio *folio) {}
488511
#define split_huge_pmd(__vma, __pmd, __address) \
489512
do { } while (0)
@@ -598,7 +621,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
598621
return split_folio_to_list_to_order(folio, NULL, new_order);
599622
}
600623

601-
#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
602-
#define split_folio(f) split_folio_to_order(f, 0)
603-
604624
#endif /* _LINUX_HUGE_MM_H */

mm/huge_memory.c

Lines changed: 61 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3082,6 +3082,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
30823082
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
30833083
* truncation).
30843084
*
3085+
* Callers should ensure that the order respects the address space mapping
3086+
* min-order if one is set for non-anonymous folios.
3087+
*
30853088
* Returns -EINVAL when trying to split to an order that is incompatible
30863089
* with the folio. Splitting to order 0 is compatible with all folios.
30873090
*/
@@ -3163,6 +3166,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
31633166
mapping = NULL;
31643167
anon_vma_lock_write(anon_vma);
31653168
} else {
3169+
unsigned int min_order;
31663170
gfp_t gfp;
31673171

31683172
mapping = folio->mapping;
@@ -3173,6 +3177,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
31733177
goto out;
31743178
}
31753179

3180+
min_order = mapping_min_folio_order(folio->mapping);
3181+
if (new_order < min_order) {
3182+
VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3183+
min_order);
3184+
ret = -EINVAL;
3185+
goto out;
3186+
}
3187+
31763188
gfp = current_gfp_context(mapping_gfp_mask(mapping) &
31773189
GFP_RECLAIM_MASK);
31783190

@@ -3285,6 +3297,30 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
32853297
return ret;
32863298
}
32873299

3300+
int min_order_for_split(struct folio *folio)
3301+
{
3302+
if (folio_test_anon(folio))
3303+
return 0;
3304+
3305+
if (!folio->mapping) {
3306+
if (folio_test_pmd_mappable(folio))
3307+
count_vm_event(THP_SPLIT_PAGE_FAILED);
3308+
return -EBUSY;
3309+
}
3310+
3311+
return mapping_min_folio_order(folio->mapping);
3312+
}
3313+
3314+
int split_folio_to_list(struct folio *folio, struct list_head *list)
3315+
{
3316+
int ret = min_order_for_split(folio);
3317+
3318+
if (ret < 0)
3319+
return ret;
3320+
3321+
return split_huge_page_to_list_to_order(&folio->page, list, ret);
3322+
}
3323+
32883324
void __folio_undo_large_rmappable(struct folio *folio)
32893325
{
32903326
struct deferred_split *ds_queue;
@@ -3515,6 +3551,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
35153551
struct vm_area_struct *vma = vma_lookup(mm, addr);
35163552
struct page *page;
35173553
struct folio *folio;
3554+
struct address_space *mapping;
3555+
unsigned int target_order = new_order;
35183556

35193557
if (!vma)
35203558
break;
@@ -3535,7 +3573,13 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
35353573
if (!is_transparent_hugepage(folio))
35363574
goto next;
35373575

3538-
if (new_order >= folio_order(folio))
3576+
if (!folio_test_anon(folio)) {
3577+
mapping = folio->mapping;
3578+
target_order = max(new_order,
3579+
mapping_min_folio_order(mapping));
3580+
}
3581+
3582+
if (target_order >= folio_order(folio))
35393583
goto next;
35403584

35413585
total++;
@@ -3551,9 +3595,14 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
35513595
if (!folio_trylock(folio))
35523596
goto next;
35533597

3554-
if (!split_folio_to_order(folio, new_order))
3598+
if (!folio_test_anon(folio) && folio->mapping != mapping)
3599+
goto unlock;
3600+
3601+
if (!split_folio_to_order(folio, target_order))
35553602
split++;
35563603

3604+
unlock:
3605+
35573606
folio_unlock(folio);
35583607
next:
35593608
folio_put(folio);
@@ -3578,6 +3627,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
35783627
pgoff_t index;
35793628
int nr_pages = 1;
35803629
unsigned long total = 0, split = 0;
3630+
unsigned int min_order;
3631+
unsigned int target_order;
35813632

35823633
file = getname_kernel(file_path);
35833634
if (IS_ERR(file))
@@ -3591,6 +3642,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
35913642
file_path, off_start, off_end);
35923643

35933644
mapping = candidate->f_mapping;
3645+
min_order = mapping_min_folio_order(mapping);
3646+
target_order = max(new_order, min_order);
35943647

35953648
for (index = off_start; index < off_end; index += nr_pages) {
35963649
struct folio *folio = filemap_get_folio(mapping, index);
@@ -3605,15 +3658,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
36053658
total++;
36063659
nr_pages = folio_nr_pages(folio);
36073660

3608-
if (new_order >= folio_order(folio))
3661+
if (target_order >= folio_order(folio))
36093662
goto next;
36103663

36113664
if (!folio_trylock(folio))
36123665
goto next;
36133666

3614-
if (!split_folio_to_order(folio, new_order))
3667+
if (folio->mapping != mapping)
3668+
goto unlock;
3669+
3670+
if (!split_folio_to_order(folio, target_order))
36153671
split++;
36163672

3673+
unlock:
36173674
folio_unlock(folio);
36183675
next:
36193676
folio_put(folio);

0 commit comments

Comments
 (0)