@@ -2982,67 +2982,63 @@ bool __folio_end_writeback(struct folio *folio)
2982
2982
return ret ;
2983
2983
}
2984
2984
2985
- bool __folio_start_writeback (struct folio * folio , bool keep_write )
2985
+ void __folio_start_writeback (struct folio * folio , bool keep_write )
2986
2986
{
2987
2987
long nr = folio_nr_pages (folio );
2988
2988
struct address_space * mapping = folio_mapping (folio );
2989
- bool ret ;
2990
2989
int access_ret ;
2991
2990
2991
+ VM_BUG_ON_FOLIO (folio_test_writeback (folio ), folio );
2992
+
2992
2993
folio_memcg_lock (folio );
2993
2994
if (mapping && mapping_use_writeback_tags (mapping )) {
2994
2995
XA_STATE (xas , & mapping -> i_pages , folio_index (folio ));
2995
2996
struct inode * inode = mapping -> host ;
2996
2997
struct backing_dev_info * bdi = inode_to_bdi (inode );
2997
2998
unsigned long flags ;
2999
+ bool on_wblist ;
2998
3000
2999
3001
xas_lock_irqsave (& xas , flags );
3000
3002
xas_load (& xas );
3001
- ret = folio_test_set_writeback (folio );
3002
- if (!ret ) {
3003
- bool on_wblist ;
3003
+ folio_test_set_writeback (folio );
3004
3004
3005
- on_wblist = mapping_tagged (mapping ,
3006
- PAGECACHE_TAG_WRITEBACK );
3005
+ on_wblist = mapping_tagged (mapping , PAGECACHE_TAG_WRITEBACK );
3007
3006
3008
- xas_set_mark (& xas , PAGECACHE_TAG_WRITEBACK );
3009
- if (bdi -> capabilities & BDI_CAP_WRITEBACK_ACCT ) {
3010
- struct bdi_writeback * wb = inode_to_wb (inode );
3011
-
3012
- wb_stat_mod (wb , WB_WRITEBACK , nr );
3013
- if (!on_wblist )
3014
- wb_inode_writeback_start (wb );
3015
- }
3007
+ xas_set_mark (& xas , PAGECACHE_TAG_WRITEBACK );
3008
+ if (bdi -> capabilities & BDI_CAP_WRITEBACK_ACCT ) {
3009
+ struct bdi_writeback * wb = inode_to_wb (inode );
3016
3010
3017
- /*
3018
- * We can come through here when swapping
3019
- * anonymous folios, so we don't necessarily
3020
- * have an inode to track for sync.
3021
- */
3022
- if (mapping -> host && !on_wblist )
3023
- sb_mark_inode_writeback (mapping -> host );
3011
+ wb_stat_mod (wb , WB_WRITEBACK , nr );
3012
+ if (!on_wblist )
3013
+ wb_inode_writeback_start (wb );
3024
3014
}
3015
+
3016
+ /*
3017
+ * We can come through here when swapping anonymous
3018
+ * folios, so we don't necessarily have an inode to
3019
+ * track for sync.
3020
+ */
3021
+ if (mapping -> host && !on_wblist )
3022
+ sb_mark_inode_writeback (mapping -> host );
3025
3023
if (!folio_test_dirty (folio ))
3026
3024
xas_clear_mark (& xas , PAGECACHE_TAG_DIRTY );
3027
3025
if (!keep_write )
3028
3026
xas_clear_mark (& xas , PAGECACHE_TAG_TOWRITE );
3029
3027
xas_unlock_irqrestore (& xas , flags );
3030
3028
} else {
3031
- ret = folio_test_set_writeback (folio );
3032
- }
3033
- if (!ret ) {
3034
- lruvec_stat_mod_folio (folio , NR_WRITEBACK , nr );
3035
- zone_stat_mod_folio (folio , NR_ZONE_WRITE_PENDING , nr );
3029
+ folio_test_set_writeback (folio );
3036
3030
}
3031
+
3032
+ lruvec_stat_mod_folio (folio , NR_WRITEBACK , nr );
3033
+ zone_stat_mod_folio (folio , NR_ZONE_WRITE_PENDING , nr );
3037
3034
folio_memcg_unlock (folio );
3035
+
3038
3036
access_ret = arch_make_folio_accessible (folio );
3039
3037
/*
3040
3038
* If writeback has been triggered on a page that cannot be made
3041
3039
* accessible, it is too late to recover here.
3042
3040
*/
3043
3041
VM_BUG_ON_FOLIO (access_ret != 0 , folio );
3044
-
3045
- return ret ;
3046
3042
}
3047
3043
EXPORT_SYMBOL (__folio_start_writeback );
3048
3044
0 commit comments