Skip to content

Commit eebb3da

Browse files
Baolin Wangakpm00
authored andcommitted
mm: migrate: record the mlocked page status to remove unnecessary lru drain
When doing compaction, I found the lru_add_drain() is an obvious hotspot when migrating pages. The distribution of this hotspot is as follows: - 18.75% compact_zone - 17.39% migrate_pages - 13.79% migrate_pages_batch - 11.66% migrate_folio_move - 7.02% lru_add_drain + 7.02% lru_add_drain_cpu + 3.00% move_to_new_folio 1.23% rmap_walk + 1.92% migrate_folio_unmap + 3.20% migrate_pages_sync + 0.90% isolate_migratepages The lru_add_drain() was added by commit c3096e6 ("mm/migrate: __unmap_and_move() push good newpage to LRU") to drain the newpage to LRU immediately, to help to build up the correct newpage->mlock_count in remove_migration_ptes() for mlocked pages. However, if there are no mlocked pages are migrating, then we can avoid this lru drain operation, especailly for the heavy concurrent scenarios. So we can record the source pages' mlocked status in migrate_folio_unmap(), and only drain the lru list when the mlocked status is set in migrate_folio_move(). In addition, the page was already isolated from lru when migrating, so checking the mlocked status is stable by folio_test_mlocked() in migrate_folio_unmap(). After this patch, I can see the hotpot of the lru_add_drain() is gone: - 9.41% migrate_pages_batch - 6.15% migrate_folio_move - 3.64% move_to_new_folio + 1.80% migrate_folio_extra + 1.70% buffer_migrate_folio + 1.41% rmap_walk + 0.62% folio_add_lru + 3.07% migrate_folio_unmap Meanwhile, the compaction latency shows some improvements when running thpscale: base patched Amean fault-both-1 1131.22 ( 0.00%) 1112.55 * 1.65%* Amean fault-both-3 2489.75 ( 0.00%) 2324.15 * 6.65%* Amean fault-both-5 3257.37 ( 0.00%) 3183.18 * 2.28%* Amean fault-both-7 4257.99 ( 0.00%) 4079.04 * 4.20%* Amean fault-both-12 6614.02 ( 0.00%) 6075.60 * 8.14%* Amean fault-both-18 10607.78 ( 0.00%) 8978.86 * 15.36%* Amean fault-both-24 14911.65 ( 0.00%) 11619.55 * 22.08%* Amean fault-both-30 14954.67 ( 0.00%) 14925.66 * 0.19%* Amean fault-both-32 16654.87 ( 0.00%) 15580.31 * 6.45%* Link: https://lkml.kernel.org/r/06e9153a7a4850352ec36602df3a3a844de45698.1697859741.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <[email protected]> Reviewed-by: "Huang, Ying" <[email protected]> Reviewed-by: Zi Yan <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Yin Fengwei <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent e5b16c8 commit eebb3da

File tree

1 file changed

+29
-19
lines changed

1 file changed

+29
-19
lines changed

mm/migrate.c

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1027,22 +1027,28 @@ union migration_ptr {
10271027
struct anon_vma *anon_vma;
10281028
struct address_space *mapping;
10291029
};
1030+
1031+
enum {
1032+
PAGE_WAS_MAPPED = BIT(0),
1033+
PAGE_WAS_MLOCKED = BIT(1),
1034+
};
1035+
10301036
static void __migrate_folio_record(struct folio *dst,
1031-
unsigned long page_was_mapped,
1037+
unsigned long old_page_state,
10321038
struct anon_vma *anon_vma)
10331039
{
10341040
union migration_ptr ptr = { .anon_vma = anon_vma };
10351041
dst->mapping = ptr.mapping;
1036-
dst->private = (void *)page_was_mapped;
1042+
dst->private = (void *)old_page_state;
10371043
}
10381044

10391045
static void __migrate_folio_extract(struct folio *dst,
1040-
int *page_was_mappedp,
1046+
int *old_page_state,
10411047
struct anon_vma **anon_vmap)
10421048
{
10431049
union migration_ptr ptr = { .mapping = dst->mapping };
10441050
*anon_vmap = ptr.anon_vma;
1045-
*page_was_mappedp = (unsigned long)dst->private;
1051+
*old_page_state = (unsigned long)dst->private;
10461052
dst->mapping = NULL;
10471053
dst->private = NULL;
10481054
}
@@ -1103,7 +1109,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
11031109
{
11041110
struct folio *dst;
11051111
int rc = -EAGAIN;
1106-
int page_was_mapped = 0;
1112+
int old_page_state = 0;
11071113
struct anon_vma *anon_vma = NULL;
11081114
bool is_lru = !__folio_test_movable(src);
11091115
bool locked = false;
@@ -1157,6 +1163,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
11571163
folio_lock(src);
11581164
}
11591165
locked = true;
1166+
if (folio_test_mlocked(src))
1167+
old_page_state |= PAGE_WAS_MLOCKED;
11601168

11611169
if (folio_test_writeback(src)) {
11621170
/*
@@ -1206,7 +1214,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
12061214
dst_locked = true;
12071215

12081216
if (unlikely(!is_lru)) {
1209-
__migrate_folio_record(dst, page_was_mapped, anon_vma);
1217+
__migrate_folio_record(dst, old_page_state, anon_vma);
12101218
return MIGRATEPAGE_UNMAP;
12111219
}
12121220

@@ -1232,11 +1240,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
12321240
VM_BUG_ON_FOLIO(folio_test_anon(src) &&
12331241
!folio_test_ksm(src) && !anon_vma, src);
12341242
try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1235-
page_was_mapped = 1;
1243+
old_page_state |= PAGE_WAS_MAPPED;
12361244
}
12371245

12381246
if (!folio_mapped(src)) {
1239-
__migrate_folio_record(dst, page_was_mapped, anon_vma);
1247+
__migrate_folio_record(dst, old_page_state, anon_vma);
12401248
return MIGRATEPAGE_UNMAP;
12411249
}
12421250

@@ -1248,7 +1256,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
12481256
if (rc == -EAGAIN)
12491257
ret = NULL;
12501258

1251-
migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
1259+
migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1260+
anon_vma, locked, ret);
12521261
migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
12531262

12541263
return rc;
@@ -1261,12 +1270,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
12611270
struct list_head *ret)
12621271
{
12631272
int rc;
1264-
int page_was_mapped = 0;
1273+
int old_page_state = 0;
12651274
struct anon_vma *anon_vma = NULL;
12661275
bool is_lru = !__folio_test_movable(src);
12671276
struct list_head *prev;
12681277

1269-
__migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1278+
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
12701279
prev = dst->lru.prev;
12711280
list_del(&dst->lru);
12721281

@@ -1287,10 +1296,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
12871296
* isolated from the unevictable LRU: but this case is the easiest.
12881297
*/
12891298
folio_add_lru(dst);
1290-
if (page_was_mapped)
1299+
if (old_page_state & PAGE_WAS_MLOCKED)
12911300
lru_add_drain();
12921301

1293-
if (page_was_mapped)
1302+
if (old_page_state & PAGE_WAS_MAPPED)
12941303
remove_migration_ptes(src, dst, false);
12951304

12961305
out_unlock_both:
@@ -1322,11 +1331,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
13221331
*/
13231332
if (rc == -EAGAIN) {
13241333
list_add(&dst->lru, prev);
1325-
__migrate_folio_record(dst, page_was_mapped, anon_vma);
1334+
__migrate_folio_record(dst, old_page_state, anon_vma);
13261335
return rc;
13271336
}
13281337

1329-
migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
1338+
migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1339+
anon_vma, true, ret);
13301340
migrate_folio_undo_dst(dst, true, put_new_folio, private);
13311341

13321342
return rc;
@@ -1799,12 +1809,12 @@ static int migrate_pages_batch(struct list_head *from,
17991809
dst = list_first_entry(&dst_folios, struct folio, lru);
18001810
dst2 = list_next_entry(dst, lru);
18011811
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1802-
int page_was_mapped = 0;
1812+
int old_page_state = 0;
18031813
struct anon_vma *anon_vma = NULL;
18041814

1805-
__migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1806-
migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
1807-
true, ret_folios);
1815+
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1816+
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1817+
anon_vma, true, ret_folios);
18081818
list_del(&dst->lru);
18091819
migrate_folio_undo_dst(dst, true, put_new_folio, private);
18101820
dst = dst2;

0 commit comments

Comments
 (0)