Skip to content

Commit 5722bcd

Browse files
committed
Merge patch series "dropbehind fixes and cleanups"
Jens Axboe <[email protected]> says: As per the thread here: https://lore.kernel.org/linux-fsdevel/20250525083209.GS2023217@ZenIV/ there was an issue with the dropbehind support, and hence it got reverted (effectively) for the 6.15 kernel release. The problem stems from the fact that the folio can get redirtied and/or scheduled for writeback after the initial dropbehind test, and before we have it locked again for invalidation. Patches 1+2 add a generic helper that both the read and write side can use, and which checks for !dirty && !writeback before going ahead with the invalidation. Patch 3 reverts the FOP_DONTCACHE disable, and patches 4 and 5 do a bit of cleanup work to further unify how the read and write side handling works. This can reasonably be considered a 2 part series, as 1-3 fix the issue and could go to stable, while 4-5 just cleanup the code. * patches from https://lore.kernel.org/[email protected]: mm/filemap: unify dropbehind flag testing and clearing mm/filemap: unify read/write dropbehind naming Revert "Disable FOP_DONTCACHE for now due to bugs" mm/filemap: use filemap_end_dropbehind() for read invalidation mm/filemap: gate dropbehind invalidate on folio !dirty && !writeback Link: https://lore.kernel.org/[email protected] Signed-off-by: Christian Brauner <[email protected]>
2 parents 015a99f + a1d98e4 commit 5722bcd

File tree

2 files changed

+25
-16
lines changed

2 files changed

+25
-16
lines changed

include/linux/fs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2207,7 +2207,7 @@ struct file_operations {
22072207
/* Supports asynchronous lock callbacks */
22082208
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
22092209
/* File system supports uncached read/write buffered IO */
2210-
#define FOP_DONTCACHE 0 /* ((__force fop_flags_t)(1 << 7)) */
2210+
#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
22112211

22122212
/* Wrap a directory iterator that needs exclusive inode access */
22132213
int wrap_directory_iterator(struct file *, struct dir_context *,

mm/filemap.c

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1589,13 +1589,30 @@ int folio_wait_private_2_killable(struct folio *folio)
15891589
}
15901590
EXPORT_SYMBOL(folio_wait_private_2_killable);
15911591

1592+
static void filemap_end_dropbehind(struct folio *folio)
1593+
{
1594+
struct address_space *mapping = folio->mapping;
1595+
1596+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1597+
1598+
if (folio_test_writeback(folio) || folio_test_dirty(folio))
1599+
return;
1600+
if (!folio_test_clear_dropbehind(folio))
1601+
return;
1602+
if (mapping)
1603+
folio_unmap_invalidate(mapping, folio, 0);
1604+
}
1605+
15921606
/*
15931607
* If folio was marked as dropbehind, then pages should be dropped when writeback
15941608
* completes. Do that now. If we fail, it's likely because of a big folio -
15951609
* just reset dropbehind for that case and latter completions should invalidate.
15961610
*/
1597-
static void folio_end_dropbehind_write(struct folio *folio)
1611+
static void filemap_end_dropbehind_write(struct folio *folio)
15981612
{
1613+
if (!folio_test_dropbehind(folio))
1614+
return;
1615+
15991616
/*
16001617
* Hitting !in_task() should not happen off RWF_DONTCACHE writeback,
16011618
* but can happen if normal writeback just happens to find dirty folios
@@ -1604,8 +1621,7 @@ static void folio_end_dropbehind_write(struct folio *folio)
16041621
* invalidation in that case.
16051622
*/
16061623
if (in_task() && folio_trylock(folio)) {
1607-
if (folio->mapping)
1608-
folio_unmap_invalidate(folio->mapping, folio, 0);
1624+
filemap_end_dropbehind(folio);
16091625
folio_unlock(folio);
16101626
}
16111627
}
@@ -1620,8 +1636,6 @@ static void folio_end_dropbehind_write(struct folio *folio)
16201636
*/
16211637
void folio_end_writeback(struct folio *folio)
16221638
{
1623-
bool folio_dropbehind = false;
1624-
16251639
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
16261640

16271641
/*
@@ -1643,14 +1657,11 @@ void folio_end_writeback(struct folio *folio)
16431657
* reused before the folio_wake_bit().
16441658
*/
16451659
folio_get(folio);
1646-
if (!folio_test_dirty(folio))
1647-
folio_dropbehind = folio_test_clear_dropbehind(folio);
16481660
if (__folio_end_writeback(folio))
16491661
folio_wake_bit(folio, PG_writeback);
1650-
acct_reclaim_writeback(folio);
16511662

1652-
if (folio_dropbehind)
1653-
folio_end_dropbehind_write(folio);
1663+
filemap_end_dropbehind_write(folio);
1664+
acct_reclaim_writeback(folio);
16541665
folio_put(folio);
16551666
}
16561667
EXPORT_SYMBOL(folio_end_writeback);
@@ -2635,16 +2646,14 @@ static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
26352646
return (pos1 >> shift == pos2 >> shift);
26362647
}
26372648

2638-
static void filemap_end_dropbehind_read(struct address_space *mapping,
2639-
struct folio *folio)
2649+
static void filemap_end_dropbehind_read(struct folio *folio)
26402650
{
26412651
if (!folio_test_dropbehind(folio))
26422652
return;
26432653
if (folio_test_writeback(folio) || folio_test_dirty(folio))
26442654
return;
26452655
if (folio_trylock(folio)) {
2646-
if (folio_test_clear_dropbehind(folio))
2647-
folio_unmap_invalidate(mapping, folio, 0);
2656+
filemap_end_dropbehind(folio);
26482657
folio_unlock(folio);
26492658
}
26502659
}
@@ -2765,7 +2774,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
27652774
for (i = 0; i < folio_batch_count(&fbatch); i++) {
27662775
struct folio *folio = fbatch.folios[i];
27672776

2768-
filemap_end_dropbehind_read(mapping, folio);
2777+
filemap_end_dropbehind_read(folio);
27692778
folio_put(folio);
27702779
}
27712780
folio_batch_init(&fbatch);

0 commit comments

Comments
 (0)