Skip to content

Commit 602f09f

Browse files
zhangyi089brauner
authored andcommitted
iomap: don't increase i_size in iomap_write_end()
This reverts commit '0841ea4a3b41 ("iomap: keep on increasing i_size in iomap_write_end()")'. After xfs could zero out the tail blocks aligned to the allocation unitsize and convert the tail blocks to unwritten for realtime inode on truncate down, it couldn't expose any stale data when unaligned truncate down realtime inodes, so we could keep on keeping i_size for IOMAP_UNSHARE and IOMAP_ZERO in iomap_write_end(). Signed-off-by: Zhang Yi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent d048945 commit 602f09f

File tree

1 file changed

+29
-24
lines changed

1 file changed

+29
-24
lines changed

fs/iomap/buffered-io.c

Lines changed: 29 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -896,37 +896,22 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
896896
size_t copied, struct folio *folio)
897897
{
898898
const struct iomap *srcmap = iomap_iter_srcmap(iter);
899-
loff_t old_size = iter->inode->i_size;
900-
size_t written;
901899

902900
if (srcmap->type == IOMAP_INLINE) {
903901
iomap_write_end_inline(iter, folio, pos, copied);
904-
written = copied;
905-
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
906-
written = block_write_end(NULL, iter->inode->i_mapping, pos,
907-
len, copied, &folio->page, NULL);
908-
WARN_ON_ONCE(written != copied && written != 0);
909-
} else {
910-
written = __iomap_write_end(iter->inode, pos, len, copied,
911-
folio) ? copied : 0;
902+
return true;
912903
}
913904

914-
/*
915-
* Update the in-memory inode size after copying the data into the page
916-
* cache. It's up to the file system to write the updated size to disk,
917-
* preferably after I/O completion so that no stale data is exposed.
918-
* Only once that's done can we unlock and release the folio.
919-
*/
920-
if (pos + written > old_size) {
921-
i_size_write(iter->inode, pos + written);
922-
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
923-
}
924-
__iomap_put_folio(iter, pos, written, folio);
905+
if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
906+
size_t bh_written;
925907

926-
if (old_size < pos)
927-
pagecache_isize_extended(iter->inode, old_size, pos);
908+
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
909+
len, copied, &folio->page, NULL);
910+
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
911+
return bh_written == copied;
912+
}
928913

929-
return written == copied;
914+
return __iomap_write_end(iter->inode, pos, len, copied, folio);
930915
}
931916

932917
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
@@ -941,6 +926,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
941926

942927
do {
943928
struct folio *folio;
929+
loff_t old_size;
944930
size_t offset; /* Offset into folio */
945931
size_t bytes; /* Bytes to write to folio */
946932
size_t copied; /* Bytes copied from user */
@@ -992,6 +978,23 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
992978
written = iomap_write_end(iter, pos, bytes, copied, folio) ?
993979
copied : 0;
994980

981+
/*
982+
* Update the in-memory inode size after copying the data into
983+
* the page cache. It's up to the file system to write the
984+
* updated size to disk, preferably after I/O completion so that
985+
* no stale data is exposed. Only once that's done can we
986+
* unlock and release the folio.
987+
*/
988+
old_size = iter->inode->i_size;
989+
if (pos + written > old_size) {
990+
i_size_write(iter->inode, pos + written);
991+
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
992+
}
993+
__iomap_put_folio(iter, pos, written, folio);
994+
995+
if (old_size < pos)
996+
pagecache_isize_extended(iter->inode, old_size, pos);
997+
995998
cond_resched();
996999
if (unlikely(written == 0)) {
9971000
/*
@@ -1362,6 +1365,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
13621365
bytes = folio_size(folio) - offset;
13631366

13641367
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1368+
__iomap_put_folio(iter, pos, bytes, folio);
13651369
if (WARN_ON_ONCE(!ret))
13661370
return -EIO;
13671371

@@ -1427,6 +1431,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
14271431
folio_mark_accessed(folio);
14281432

14291433
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1434+
__iomap_put_folio(iter, pos, bytes, folio);
14301435
if (WARN_ON_ONCE(!ret))
14311436
return -EIO;
14321437

0 commit comments

Comments
 (0)