Skip to content

Commit e0cd5e7

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/vmscan: Convert pageout() to take a folio
We always write out an entire folio at once. This conversion removes a few calls to compound_head() and gets the NR_VMSCAN_WRITE statistic right when writing out a large folio. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
1 parent d92013d commit e0cd5e7

File tree

2 files changed

+37
-37
lines changed

2 files changed

+37
-37
lines changed

include/trace/events/vmscan.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -327,21 +327,21 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
327327
__print_symbolic(__entry->lru, LRU_NAMES))
328328
);
329329

330-
TRACE_EVENT(mm_vmscan_writepage,
330+
TRACE_EVENT(mm_vmscan_write_folio,
331331

332-
TP_PROTO(struct page *page),
332+
TP_PROTO(struct folio *folio),
333333

334-
TP_ARGS(page),
334+
TP_ARGS(folio),
335335

336336
TP_STRUCT__entry(
337337
__field(unsigned long, pfn)
338338
__field(int, reclaim_flags)
339339
),
340340

341341
TP_fast_assign(
342-
__entry->pfn = page_to_pfn(page);
342+
__entry->pfn = folio_pfn(folio);
343343
__entry->reclaim_flags = trace_reclaim_flags(
344-
page_is_file_lru(page));
344+
folio_is_file_lru(folio));
345345
),
346346

347347
TP_printk("page=%p pfn=0x%lx flags=%s",

mm/vmscan.c

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -978,15 +978,15 @@ void drop_slab(void)
978978
drop_slab_node(nid);
979979
}
980980

981-
static inline int is_page_cache_freeable(struct page *page)
981+
static inline int is_page_cache_freeable(struct folio *folio)
982982
{
983983
/*
984984
* A freeable page cache page is referenced only by the caller
985985
* that isolated the page, the page cache and optional buffer
986986
* heads at page->private.
987987
*/
988-
int page_cache_pins = thp_nr_pages(page);
989-
return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
988+
return folio_ref_count(folio) - folio_test_private(folio) ==
989+
1 + folio_nr_pages(folio);
990990
}
991991

992992
static int may_write_to_inode(struct inode *inode)
@@ -1001,24 +1001,24 @@ static int may_write_to_inode(struct inode *inode)
10011001
}
10021002

10031003
/*
1004-
* We detected a synchronous write error writing a page out. Probably
1004+
* We detected a synchronous write error writing a folio out. Probably
10051005
* -ENOSPC. We need to propagate that into the address_space for a subsequent
10061006
* fsync(), msync() or close().
10071007
*
10081008
* The tricky part is that after writepage we cannot touch the mapping: nothing
1009-
* prevents it from being freed up. But we have a ref on the page and once
1010-
* that page is locked, the mapping is pinned.
1009+
* prevents it from being freed up. But we have a ref on the folio and once
1010+
* that folio is locked, the mapping is pinned.
10111011
*
1012-
* We're allowed to run sleeping lock_page() here because we know the caller has
1012+
* We're allowed to run sleeping folio_lock() here because we know the caller has
10131013
* __GFP_FS.
10141014
*/
10151015
static void handle_write_error(struct address_space *mapping,
1016-
struct page *page, int error)
1016+
struct folio *folio, int error)
10171017
{
1018-
lock_page(page);
1019-
if (page_mapping(page) == mapping)
1018+
folio_lock(folio);
1019+
if (folio_mapping(folio) == mapping)
10201020
mapping_set_error(mapping, error);
1021-
unlock_page(page);
1021+
folio_unlock(folio);
10221022
}
10231023

10241024
static bool skip_throttle_noprogress(pg_data_t *pgdat)
@@ -1165,35 +1165,35 @@ typedef enum {
11651165
* pageout is called by shrink_page_list() for each dirty page.
11661166
* Calls ->writepage().
11671167
*/
1168-
static pageout_t pageout(struct page *page, struct address_space *mapping)
1168+
static pageout_t pageout(struct folio *folio, struct address_space *mapping)
11691169
{
11701170
/*
1171-
* If the page is dirty, only perform writeback if that write
1171+
* If the folio is dirty, only perform writeback if that write
11721172
* will be non-blocking. To prevent this allocation from being
11731173
* stalled by pagecache activity. But note that there may be
11741174
* stalls if we need to run get_block(). We could test
11751175
* PagePrivate for that.
11761176
*
11771177
* If this process is currently in __generic_file_write_iter() against
1178-
* this page's queue, we can perform writeback even if that
1178+
* this folio's queue, we can perform writeback even if that
11791179
* will block.
11801180
*
1181-
* If the page is swapcache, write it back even if that would
1181+
* If the folio is swapcache, write it back even if that would
11821182
* block, for some throttling. This happens by accident, because
11831183
* swap_backing_dev_info is bust: it doesn't reflect the
11841184
* congestion state of the swapdevs. Easy to fix, if needed.
11851185
*/
1186-
if (!is_page_cache_freeable(page))
1186+
if (!is_page_cache_freeable(folio))
11871187
return PAGE_KEEP;
11881188
if (!mapping) {
11891189
/*
1190-
* Some data journaling orphaned pages can have
1191-
* page->mapping == NULL while being dirty with clean buffers.
1190+
* Some data journaling orphaned folios can have
1191+
* folio->mapping == NULL while being dirty with clean buffers.
11921192
*/
1193-
if (page_has_private(page)) {
1194-
if (try_to_free_buffers(page)) {
1195-
ClearPageDirty(page);
1196-
pr_info("%s: orphaned page\n", __func__);
1193+
if (folio_test_private(folio)) {
1194+
if (try_to_free_buffers(&folio->page)) {
1195+
folio_clear_dirty(folio);
1196+
pr_info("%s: orphaned folio\n", __func__);
11971197
return PAGE_CLEAN;
11981198
}
11991199
}
@@ -1204,7 +1204,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
12041204
if (!may_write_to_inode(mapping->host))
12051205
return PAGE_KEEP;
12061206

1207-
if (clear_page_dirty_for_io(page)) {
1207+
if (folio_clear_dirty_for_io(folio)) {
12081208
int res;
12091209
struct writeback_control wbc = {
12101210
.sync_mode = WB_SYNC_NONE,
@@ -1214,21 +1214,21 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
12141214
.for_reclaim = 1,
12151215
};
12161216

1217-
SetPageReclaim(page);
1218-
res = mapping->a_ops->writepage(page, &wbc);
1217+
folio_set_reclaim(folio);
1218+
res = mapping->a_ops->writepage(&folio->page, &wbc);
12191219
if (res < 0)
1220-
handle_write_error(mapping, page, res);
1220+
handle_write_error(mapping, folio, res);
12211221
if (res == AOP_WRITEPAGE_ACTIVATE) {
1222-
ClearPageReclaim(page);
1222+
folio_clear_reclaim(folio);
12231223
return PAGE_ACTIVATE;
12241224
}
12251225

1226-
if (!PageWriteback(page)) {
1226+
if (!folio_test_writeback(folio)) {
12271227
/* synchronous write or broken a_ops? */
1228-
ClearPageReclaim(page);
1228+
folio_clear_reclaim(folio);
12291229
}
1230-
trace_mm_vmscan_writepage(page);
1231-
inc_node_page_state(page, NR_VMSCAN_WRITE);
1230+
trace_mm_vmscan_write_folio(folio);
1231+
node_stat_add_folio(folio, NR_VMSCAN_WRITE);
12321232
return PAGE_SUCCESS;
12331233
}
12341234

@@ -1816,7 +1816,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
18161816
* starts and then write it out here.
18171817
*/
18181818
try_to_unmap_flush_dirty();
1819-
switch (pageout(page, mapping)) {
1819+
switch (pageout(folio, mapping)) {
18201820
case PAGE_KEEP:
18211821
goto keep_locked;
18221822
case PAGE_ACTIVATE:

0 commit comments

Comments
 (0)