Skip to content

Commit 8479851

Browse files
Matthew Wilcox (Oracle)brauner
authored andcommitted
mm: Remove swap_writepage() and shmem_writepage()
Call swap_writeout() and shmem_writeout() from pageout() instead. Signed-off-by: "Matthew Wilcox (Oracle)" <[email protected]> Link: https://lore.kernel.org/r/[email protected] Tested-by: Baolin Wang <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent fe75adf commit 8479851

File tree

7 files changed

+26
-37
lines changed

7 files changed

+26
-37
lines changed

block/blk-wbt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
enum wbt_flags {
3838
WBT_TRACKED = 1, /* write, tracked for throttling */
3939
WBT_READ = 2, /* read */
40-
WBT_SWAP = 4, /* write, from swap_writepage() */
40+
WBT_SWAP = 4, /* write, from swap_writeout() */
4141
WBT_DISCARD = 8, /* discard */
4242

4343
WBT_NR_BITS = 4, /* number of bits */

mm/page_io.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -237,9 +237,8 @@ static void swap_zeromap_folio_clear(struct folio *folio)
237237
* We may have stale swap cache pages in memory: notice
238238
* them here and get rid of the unnecessary final write.
239239
*/
240-
int swap_writepage(struct page *page, struct writeback_control *wbc)
240+
int swap_writeout(struct folio *folio, struct writeback_control *wbc)
241241
{
242-
struct folio *folio = page_folio(page);
243242
int ret;
244243

245244
if (folio_free_swap(folio)) {

mm/shmem.c

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static struct vfsmount *shm_mnt __ro_after_init;
9898
#define SHORT_SYMLINK_LEN 128
9999

100100
/*
101-
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
101+
* shmem_fallocate communicates with shmem_fault or shmem_writeout via
102102
* inode->i_private (with i_rwsem making sure that it has only one user at
103103
* a time): we would prefer not to enlarge the shmem inode just for that.
104104
*/
@@ -107,7 +107,7 @@ struct shmem_falloc {
107107
pgoff_t start; /* start of range currently being fallocated */
108108
pgoff_t next; /* the next page offset to be fallocated */
109109
pgoff_t nr_falloced; /* how many new pages have been fallocated */
110-
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
110+
pgoff_t nr_unswapped; /* how often writeout refused to swap out */
111111
};
112112

113113
struct shmem_options {
@@ -446,7 +446,7 @@ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
446446
/*
447447
* Special case: whereas normally shmem_recalc_inode() is called
448448
* after i_mapping->nrpages has already been adjusted (up or down),
449-
* shmem_writepage() has to raise swapped before nrpages is lowered -
449+
* shmem_writeout() has to raise swapped before nrpages is lowered -
450450
* to stop a racing shmem_recalc_inode() from thinking that a page has
451451
* been freed. Compensate here, to avoid the need for a followup call.
452452
*/
@@ -1536,11 +1536,6 @@ int shmem_unuse(unsigned int type)
15361536
return error;
15371537
}
15381538

1539-
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1540-
{
1541-
return shmem_writeout(page_folio(page), wbc);
1542-
}
1543-
15441539
/**
15451540
* shmem_writeout - Write the folio to swap
15461541
* @folio: The folio to write
@@ -1558,13 +1553,6 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
15581553
int nr_pages;
15591554
bool split = false;
15601555

1561-
/*
1562-
* Our capabilities prevent regular writeback or sync from ever calling
1563-
* shmem_writepage; but a stacking filesystem might use ->writepage of
1564-
* its underlying filesystem, in which case tmpfs should write out to
1565-
* swap only in response to memory pressure, and not for the writeback
1566-
* threads or sync.
1567-
*/
15681556
if (WARN_ON_ONCE(!wbc->for_reclaim))
15691557
goto redirty;
15701558

@@ -1653,7 +1641,7 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
16531641

16541642
mutex_unlock(&shmem_swaplist_mutex);
16551643
BUG_ON(folio_mapped(folio));
1656-
return swap_writepage(&folio->page, wbc);
1644+
return swap_writeout(folio, wbc);
16571645
}
16581646

16591647
list_del_init(&info->swaplist);
@@ -3776,7 +3764,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
37763764
index--;
37773765

37783766
/*
3779-
* Inform shmem_writepage() how far we have reached.
3767+
* Inform shmem_writeout() how far we have reached.
37803768
* No need for lock or barrier: we have the page lock.
37813769
*/
37823770
if (!folio_test_uptodate(folio))
@@ -5199,7 +5187,6 @@ static int shmem_error_remove_folio(struct address_space *mapping,
51995187
}
52005188

52015189
static const struct address_space_operations shmem_aops = {
5202-
.writepage = shmem_writepage,
52035190
.dirty_folio = noop_dirty_folio,
52045191
#ifdef CONFIG_TMPFS
52055192
.write_begin = shmem_write_begin,

mm/swap.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
2020
__swap_read_unplug(plug);
2121
}
2222
void swap_write_unplug(struct swap_iocb *sio);
23-
int swap_writepage(struct page *page, struct writeback_control *wbc);
23+
int swap_writeout(struct folio *folio, struct writeback_control *wbc);
2424
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
2525

2626
/* linux/mm/swap_state.c */
@@ -141,7 +141,7 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
141141
return NULL;
142142
}
143143

144-
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
144+
static inline int swap_writeout(struct folio *f, struct writeback_control *wbc)
145145
{
146146
return 0;
147147
}

mm/swap_state.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
* vmscan's shrink_folio_list.
3131
*/
3232
static const struct address_space_operations swap_aops = {
33-
.writepage = swap_writepage,
3433
.dirty_folio = noop_dirty_folio,
3534
#ifdef CONFIG_MIGRATION
3635
.migrate_folio = migrate_folio,

mm/swapfile.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2359,7 +2359,7 @@ static int try_to_unuse(unsigned int type)
23592359
* Limit the number of retries? No: when mmget_not_zero()
23602360
* above fails, that mm is likely to be freeing swap from
23612361
* exit_mmap(), which proceeds at its own independent pace;
2362-
* and even shmem_writepage() could have been preempted after
2362+
* and even shmem_writeout() could have been preempted after
23632363
* folio_alloc_swap(), temporarily hiding that swap. It's easy
23642364
* and robust (though cpu-intensive) just to keep retrying.
23652365
*/

mm/vmscan.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -653,16 +653,16 @@ typedef enum {
653653
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
654654
struct swap_iocb **plug, struct list_head *folio_list)
655655
{
656+
int (*writeout)(struct folio *, struct writeback_control *);
657+
656658
/*
657-
* If the folio is dirty, only perform writeback if that write
658-
* will be non-blocking. To prevent this allocation from being
659-
* stalled by pagecache activity. But note that there may be
660-
* stalls if we need to run get_block(). We could test
661-
* PagePrivate for that.
662-
*
663-
* If this process is currently in __generic_file_write_iter() against
664-
* this folio's queue, we can perform writeback even if that
665-
* will block.
659+
* We no longer attempt to writeback filesystem folios here, other
660+
* than tmpfs/shmem. That's taken care of in page-writeback.
661+
* If we find a dirty filesystem folio at the end of the LRU list,
662+
* typically that means the filesystem is saturating the storage
663+
* with contiguous writes and telling it to write a folio here
664+
* would only make the situation worse by injecting an element
665+
* of random access.
666666
*
667667
* If the folio is swapcache, write it back even if that would
668668
* block, for some throttling. This happens by accident, because
@@ -685,7 +685,11 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
685685
}
686686
return PAGE_KEEP;
687687
}
688-
if (mapping->a_ops->writepage == NULL)
688+
if (shmem_mapping(mapping))
689+
writeout = shmem_writeout;
690+
else if (folio_test_anon(folio))
691+
writeout = swap_writeout;
692+
else
689693
return PAGE_ACTIVATE;
690694

691695
if (folio_clear_dirty_for_io(folio)) {
@@ -708,7 +712,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
708712
wbc.list = folio_list;
709713

710714
folio_set_reclaim(folio);
711-
res = mapping->a_ops->writepage(&folio->page, &wbc);
715+
res = writeout(folio, &wbc);
712716
if (res < 0)
713717
handle_write_error(mapping, folio, res);
714718
if (res == AOP_WRITEPAGE_ACTIVATE) {
@@ -717,7 +721,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
717721
}
718722

719723
if (!folio_test_writeback(folio)) {
720-
/* synchronous write or broken a_ops? */
724+
/* synchronous write? */
721725
folio_clear_reclaim(folio);
722726
}
723727
trace_mm_vmscan_write_folio(folio);

0 commit comments

Comments
 (0)