@@ -978,15 +978,15 @@ void drop_slab(void)
978
978
drop_slab_node (nid );
979
979
}
980
980
981
- static inline int is_page_cache_freeable (struct page * page )
981
+ static inline int is_page_cache_freeable (struct folio * folio )
982
982
{
983
983
/*
984
984
* A freeable page cache page is referenced only by the caller
985
985
* that isolated the page, the page cache and optional buffer
986
986
* heads at page->private.
987
987
*/
988
- int page_cache_pins = thp_nr_pages ( page );
989
- return page_count ( page ) - page_has_private ( page ) == 1 + page_cache_pins ;
988
+ return folio_ref_count ( folio ) - folio_test_private ( folio ) ==
989
+ 1 + folio_nr_pages ( folio ) ;
990
990
}
991
991
992
992
static int may_write_to_inode (struct inode * inode )
@@ -1001,24 +1001,24 @@ static int may_write_to_inode(struct inode *inode)
1001
1001
}
1002
1002
1003
1003
/*
1004
- * We detected a synchronous write error writing a page out. Probably
1004
+ * We detected a synchronous write error writing a folio out. Probably
1005
1005
* -ENOSPC. We need to propagate that into the address_space for a subsequent
1006
1006
* fsync(), msync() or close().
1007
1007
*
1008
1008
* The tricky part is that after writepage we cannot touch the mapping: nothing
1009
- * prevents it from being freed up. But we have a ref on the page and once
1010
- * that page is locked, the mapping is pinned.
1009
+ * prevents it from being freed up. But we have a ref on the folio and once
1010
+ * that folio is locked, the mapping is pinned.
1011
1011
*
1012
- * We're allowed to run sleeping lock_page () here because we know the caller has
1012
+ * We're allowed to run sleeping folio_lock () here because we know the caller has
1013
1013
* __GFP_FS.
1014
1014
*/
1015
1015
static void handle_write_error (struct address_space * mapping ,
1016
- struct page * page , int error )
1016
+ struct folio * folio , int error )
1017
1017
{
1018
- lock_page ( page );
1019
- if (page_mapping ( page ) == mapping )
1018
+ folio_lock ( folio );
1019
+ if (folio_mapping ( folio ) == mapping )
1020
1020
mapping_set_error (mapping , error );
1021
- unlock_page ( page );
1021
+ folio_unlock ( folio );
1022
1022
}
1023
1023
1024
1024
static bool skip_throttle_noprogress (pg_data_t * pgdat )
@@ -1165,35 +1165,35 @@ typedef enum {
1165
1165
* pageout is called by shrink_page_list() for each dirty page.
1166
1166
* Calls ->writepage().
1167
1167
*/
1168
- static pageout_t pageout (struct page * page , struct address_space * mapping )
1168
+ static pageout_t pageout (struct folio * folio , struct address_space * mapping )
1169
1169
{
1170
1170
/*
1171
- * If the page is dirty, only perform writeback if that write
1171
+ * If the folio is dirty, only perform writeback if that write
1172
1172
* will be non-blocking. To prevent this allocation from being
1173
1173
* stalled by pagecache activity. But note that there may be
1174
1174
* stalls if we need to run get_block(). We could test
1175
1175
* PagePrivate for that.
1176
1176
*
1177
1177
* If this process is currently in __generic_file_write_iter() against
1178
- * this page 's queue, we can perform writeback even if that
1178
+ * this folio 's queue, we can perform writeback even if that
1179
1179
* will block.
1180
1180
*
1181
- * If the page is swapcache, write it back even if that would
1181
+ * If the folio is swapcache, write it back even if that would
1182
1182
* block, for some throttling. This happens by accident, because
1183
1183
* swap_backing_dev_info is bust: it doesn't reflect the
1184
1184
* congestion state of the swapdevs. Easy to fix, if needed.
1185
1185
*/
1186
- if (!is_page_cache_freeable (page ))
1186
+ if (!is_page_cache_freeable (folio ))
1187
1187
return PAGE_KEEP ;
1188
1188
if (!mapping ) {
1189
1189
/*
1190
- * Some data journaling orphaned pages can have
1191
- * page ->mapping == NULL while being dirty with clean buffers.
1190
+ * Some data journaling orphaned folios can have
1191
+ * folio ->mapping == NULL while being dirty with clean buffers.
1192
1192
*/
1193
- if (page_has_private ( page )) {
1194
- if (try_to_free_buffers (page )) {
1195
- ClearPageDirty ( page );
1196
- pr_info ("%s: orphaned page \n" , __func__ );
1193
+ if (folio_test_private ( folio )) {
1194
+ if (try_to_free_buffers (& folio -> page )) {
1195
+ folio_clear_dirty ( folio );
1196
+ pr_info ("%s: orphaned folio \n" , __func__ );
1197
1197
return PAGE_CLEAN ;
1198
1198
}
1199
1199
}
@@ -1204,7 +1204,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
1204
1204
if (!may_write_to_inode (mapping -> host ))
1205
1205
return PAGE_KEEP ;
1206
1206
1207
- if (clear_page_dirty_for_io ( page )) {
1207
+ if (folio_clear_dirty_for_io ( folio )) {
1208
1208
int res ;
1209
1209
struct writeback_control wbc = {
1210
1210
.sync_mode = WB_SYNC_NONE ,
@@ -1214,21 +1214,21 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
1214
1214
.for_reclaim = 1 ,
1215
1215
};
1216
1216
1217
- SetPageReclaim ( page );
1218
- res = mapping -> a_ops -> writepage (page , & wbc );
1217
+ folio_set_reclaim ( folio );
1218
+ res = mapping -> a_ops -> writepage (& folio -> page , & wbc );
1219
1219
if (res < 0 )
1220
- handle_write_error (mapping , page , res );
1220
+ handle_write_error (mapping , folio , res );
1221
1221
if (res == AOP_WRITEPAGE_ACTIVATE ) {
1222
- ClearPageReclaim ( page );
1222
+ folio_clear_reclaim ( folio );
1223
1223
return PAGE_ACTIVATE ;
1224
1224
}
1225
1225
1226
- if (!PageWriteback ( page )) {
1226
+ if (!folio_test_writeback ( folio )) {
1227
1227
/* synchronous write or broken a_ops? */
1228
- ClearPageReclaim ( page );
1228
+ folio_clear_reclaim ( folio );
1229
1229
}
1230
- trace_mm_vmscan_writepage ( page );
1231
- inc_node_page_state ( page , NR_VMSCAN_WRITE );
1230
+ trace_mm_vmscan_write_folio ( folio );
1231
+ node_stat_add_folio ( folio , NR_VMSCAN_WRITE );
1232
1232
return PAGE_SUCCESS ;
1233
1233
}
1234
1234
@@ -1816,7 +1816,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
1816
1816
* starts and then write it out here.
1817
1817
*/
1818
1818
try_to_unmap_flush_dirty ();
1819
- switch (pageout (page , mapping )) {
1819
+ switch (pageout (folio , mapping )) {
1820
1820
case PAGE_KEEP :
1821
1821
goto keep_locked ;
1822
1822
case PAGE_ACTIVATE :
0 commit comments