@@ -3577,7 +3577,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3577
3577
!list_empty (& folio -> _deferred_list )) {
3578
3578
ds_queue -> split_queue_len -- ;
3579
3579
if (folio_test_partially_mapped (folio )) {
3580
- __folio_clear_partially_mapped (folio );
3580
+ folio_clear_partially_mapped (folio );
3581
3581
mod_mthp_stat (folio_order (folio ),
3582
3582
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED , -1 );
3583
3583
}
@@ -3689,7 +3689,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
3689
3689
if (!list_empty (& folio -> _deferred_list )) {
3690
3690
ds_queue -> split_queue_len -- ;
3691
3691
if (folio_test_partially_mapped (folio )) {
3692
- __folio_clear_partially_mapped (folio );
3692
+ folio_clear_partially_mapped (folio );
3693
3693
mod_mthp_stat (folio_order (folio ),
3694
3694
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED , -1 );
3695
3695
}
@@ -3733,7 +3733,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
3733
3733
spin_lock_irqsave (& ds_queue -> split_queue_lock , flags );
3734
3734
if (partially_mapped ) {
3735
3735
if (!folio_test_partially_mapped (folio )) {
3736
- __folio_set_partially_mapped (folio );
3736
+ folio_set_partially_mapped (folio );
3737
3737
if (folio_test_pmd_mappable (folio ))
3738
3738
count_vm_event (THP_DEFERRED_SPLIT_PAGE );
3739
3739
count_mthp_stat (folio_order (folio ), MTHP_STAT_SPLIT_DEFERRED );
@@ -3826,7 +3826,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
3826
3826
} else {
3827
3827
/* We lost race with folio_put() */
3828
3828
if (folio_test_partially_mapped (folio )) {
3829
- __folio_clear_partially_mapped (folio );
3829
+ folio_clear_partially_mapped (folio );
3830
3830
mod_mthp_stat (folio_order (folio ),
3831
3831
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED , -1 );
3832
3832
}
0 commit comments