Skip to content

Commit b30a671

Browse files
Matthew Wilcox (Oracle)opsiff
authored andcommitted
mm: make __end_folio_writeback() return void
mainline incusion from mainline-v6.7-rc1 category: performance Rather than check the result of test-and-clear, just check that we have the writeback bit set at the start. This wouldn't catch every case, but it's good enough (and enables the next patch). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Albert Ou <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Andreas Dilger <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ivan Kokshaysky <[email protected]> Cc: Matt Turner <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Palmer Dabbelt <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: Richard Henderson <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: "Theodore Ts'o" <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Cc: Vasily Gorbik <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit 7d0795d) Signed-off-by: Wentao Guan <[email protected]>
1 parent 1d8d227 commit b30a671

File tree

3 files changed

+24
-25
lines changed

3 files changed

+24
-25
lines changed

mm/filemap.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1604,9 +1604,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
16041604
/**
16051605
* folio_end_writeback - End writeback against a folio.
16061606
* @folio: The folio.
1607+
*
1608+
* The folio must actually be under writeback.
1609+
*
1610+
* Context: May be called from process or interrupt context.
16071611
*/
16081612
void folio_end_writeback(struct folio *folio)
16091613
{
1614+
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
1615+
16101616
/*
16111617
* folio_test_clear_reclaim() could be used here but it is an
16121618
* atomic operation and overkill in this particular case. Failing
@@ -1626,8 +1632,7 @@ void folio_end_writeback(struct folio *folio)
16261632
* reused before the folio_wake().
16271633
*/
16281634
folio_get(folio);
1629-
if (!__folio_end_writeback(folio))
1630-
BUG();
1635+
__folio_end_writeback(folio);
16311636

16321637
smp_mb__after_atomic();
16331638
folio_wake(folio, PG_writeback);

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
150150

151151
vm_fault_t do_swap_page(struct vm_fault *vmf);
152152
void folio_rotate_reclaimable(struct folio *folio);
153-
bool __folio_end_writeback(struct folio *folio);
153+
void __folio_end_writeback(struct folio *folio);
154154
void deactivate_file_folio(struct folio *folio);
155155
void folio_activate(struct folio *folio);
156156

mm/page-writeback.c

Lines changed: 16 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2995,11 +2995,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
29952995
spin_unlock_irqrestore(&wb->work_lock, flags);
29962996
}
29972997

2998-
bool __folio_end_writeback(struct folio *folio)
2998+
void __folio_end_writeback(struct folio *folio)
29992999
{
30003000
long nr = folio_nr_pages(folio);
30013001
struct address_space *mapping = folio_mapping(folio);
3002-
bool ret;
30033002

30043003
folio_memcg_lock(folio);
30053004
if (mapping && mapping_use_writeback_tags(mapping)) {
@@ -3008,19 +3007,16 @@ bool __folio_end_writeback(struct folio *folio)
30083007
unsigned long flags;
30093008

30103009
xa_lock_irqsave(&mapping->i_pages, flags);
3011-
ret = folio_test_clear_writeback(folio);
3012-
if (ret) {
3013-
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
3014-
PAGECACHE_TAG_WRITEBACK);
3015-
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3016-
struct bdi_writeback *wb = inode_to_wb(inode);
3017-
3018-
wb_stat_mod(wb, WB_WRITEBACK, -nr);
3019-
__wb_writeout_add(wb, nr);
3020-
if (!mapping_tagged(mapping,
3021-
PAGECACHE_TAG_WRITEBACK))
3022-
wb_inode_writeback_end(wb);
3023-
}
3010+
folio_test_clear_writeback(folio);
3011+
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
3012+
PAGECACHE_TAG_WRITEBACK);
3013+
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3014+
struct bdi_writeback *wb = inode_to_wb(inode);
3015+
3016+
wb_stat_mod(wb, WB_WRITEBACK, -nr);
3017+
__wb_writeout_add(wb, nr);
3018+
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
3019+
wb_inode_writeback_end(wb);
30243020
}
30253021

30263022
if (mapping->host && !mapping_tagged(mapping,
@@ -3029,15 +3025,13 @@ bool __folio_end_writeback(struct folio *folio)
30293025

30303026
xa_unlock_irqrestore(&mapping->i_pages, flags);
30313027
} else {
3032-
ret = folio_test_clear_writeback(folio);
3033-
}
3034-
if (ret) {
3035-
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3036-
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3037-
node_stat_mod_folio(folio, NR_WRITTEN, nr);
3028+
folio_test_clear_writeback(folio);
30383029
}
3030+
3031+
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3032+
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3033+
node_stat_mod_folio(folio, NR_WRITTEN, nr);
30393034
folio_memcg_unlock(folio);
3040-
return ret;
30413035
}
30423036

30433037
bool __folio_start_writeback(struct folio *folio, bool keep_write)

0 commit comments

Comments
 (0)