@@ -3082,6 +3082,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
3082
3082
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
3083
3083
* truncation).
3084
3084
*
3085
+ * Callers should ensure that the order respects the address space mapping
3086
+ * min-order if one is set for non-anonymous folios.
3087
+ *
3085
3088
* Returns -EINVAL when trying to split to an order that is incompatible
3086
3089
* with the folio. Splitting to order 0 is compatible with all folios.
3087
3090
*/
@@ -3163,6 +3166,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3163
3166
mapping = NULL ;
3164
3167
anon_vma_lock_write (anon_vma );
3165
3168
} else {
3169
+ unsigned int min_order ;
3166
3170
gfp_t gfp ;
3167
3171
3168
3172
mapping = folio -> mapping ;
@@ -3173,6 +3177,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3173
3177
goto out ;
3174
3178
}
3175
3179
3180
+ min_order = mapping_min_folio_order (folio -> mapping );
3181
+ if (new_order < min_order ) {
3182
+ VM_WARN_ONCE (1 , "Cannot split mapped folio below min-order: %u" ,
3183
+ min_order );
3184
+ ret = - EINVAL ;
3185
+ goto out ;
3186
+ }
3187
+
3176
3188
gfp = current_gfp_context (mapping_gfp_mask (mapping ) &
3177
3189
GFP_RECLAIM_MASK );
3178
3190
@@ -3285,6 +3297,30 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3285
3297
return ret ;
3286
3298
}
3287
3299
3300
+ int min_order_for_split (struct folio * folio )
3301
+ {
3302
+ if (folio_test_anon (folio ))
3303
+ return 0 ;
3304
+
3305
+ if (!folio -> mapping ) {
3306
+ if (folio_test_pmd_mappable (folio ))
3307
+ count_vm_event (THP_SPLIT_PAGE_FAILED );
3308
+ return - EBUSY ;
3309
+ }
3310
+
3311
+ return mapping_min_folio_order (folio -> mapping );
3312
+ }
3313
+
3314
+ int split_folio_to_list (struct folio * folio , struct list_head * list )
3315
+ {
3316
+ int ret = min_order_for_split (folio );
3317
+
3318
+ if (ret < 0 )
3319
+ return ret ;
3320
+
3321
+ return split_huge_page_to_list_to_order (& folio -> page , list , ret );
3322
+ }
3323
+
3288
3324
void __folio_undo_large_rmappable (struct folio * folio )
3289
3325
{
3290
3326
struct deferred_split * ds_queue ;
@@ -3515,6 +3551,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3515
3551
struct vm_area_struct * vma = vma_lookup (mm , addr );
3516
3552
struct page * page ;
3517
3553
struct folio * folio ;
3554
+ struct address_space * mapping ;
3555
+ unsigned int target_order = new_order ;
3518
3556
3519
3557
if (!vma )
3520
3558
break ;
@@ -3535,7 +3573,13 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3535
3573
if (!is_transparent_hugepage (folio ))
3536
3574
goto next ;
3537
3575
3538
- if (new_order >= folio_order (folio ))
3576
+ if (!folio_test_anon (folio )) {
3577
+ mapping = folio -> mapping ;
3578
+ target_order = max (new_order ,
3579
+ mapping_min_folio_order (mapping ));
3580
+ }
3581
+
3582
+ if (target_order >= folio_order (folio ))
3539
3583
goto next ;
3540
3584
3541
3585
total ++ ;
@@ -3551,9 +3595,14 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3551
3595
if (!folio_trylock (folio ))
3552
3596
goto next ;
3553
3597
3554
- if (!split_folio_to_order (folio , new_order ))
3598
+ if (!folio_test_anon (folio ) && folio -> mapping != mapping )
3599
+ goto unlock ;
3600
+
3601
+ if (!split_folio_to_order (folio , target_order ))
3555
3602
split ++ ;
3556
3603
3604
+ unlock :
3605
+
3557
3606
folio_unlock (folio );
3558
3607
next :
3559
3608
folio_put (folio );
@@ -3578,6 +3627,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3578
3627
pgoff_t index ;
3579
3628
int nr_pages = 1 ;
3580
3629
unsigned long total = 0 , split = 0 ;
3630
+ unsigned int min_order ;
3631
+ unsigned int target_order ;
3581
3632
3582
3633
file = getname_kernel (file_path );
3583
3634
if (IS_ERR (file ))
@@ -3591,6 +3642,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3591
3642
file_path , off_start , off_end );
3592
3643
3593
3644
mapping = candidate -> f_mapping ;
3645
+ min_order = mapping_min_folio_order (mapping );
3646
+ target_order = max (new_order , min_order );
3594
3647
3595
3648
for (index = off_start ; index < off_end ; index += nr_pages ) {
3596
3649
struct folio * folio = filemap_get_folio (mapping , index );
@@ -3605,15 +3658,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3605
3658
total ++ ;
3606
3659
nr_pages = folio_nr_pages (folio );
3607
3660
3608
- if (new_order >= folio_order (folio ))
3661
+ if (target_order >= folio_order (folio ))
3609
3662
goto next ;
3610
3663
3611
3664
if (!folio_trylock (folio ))
3612
3665
goto next ;
3613
3666
3614
- if (!split_folio_to_order (folio , new_order ))
3667
+ if (folio -> mapping != mapping )
3668
+ goto unlock ;
3669
+
3670
+ if (!split_folio_to_order (folio , target_order ))
3615
3671
split ++ ;
3616
3672
3673
+ unlock :
3617
3674
folio_unlock (folio );
3618
3675
next :
3619
3676
folio_put (folio );
0 commit comments