Skip to content

Commit abb99cf

Browse files
naotakdave
authored andcommitted
btrfs: properly split extent_map for REQ_OP_ZONE_APPEND
Damien reported a test failure with btrfs/209. The test itself ran fine, but the fsck ran afterwards reported a corrupted filesystem. The filesystem corruption happens because we're splitting an extent and then writing the extent twice. We have to split the extent though, because we're creating too large extents for a REQ_OP_ZONE_APPEND operation. When dumping the extent tree, we can see two EXTENT_ITEMs at the same start address but different lengths. $ btrfs inspect dump-tree /dev/nullb1 -t extent ... item 19 key (269484032 EXTENT_ITEM 126976) itemoff 15470 itemsize 53 refs 1 gen 7 flags DATA extent data backref root FS_TREE objectid 257 offset 786432 count 1 item 20 key (269484032 EXTENT_ITEM 262144) itemoff 15417 itemsize 53 refs 1 gen 7 flags DATA extent data backref root FS_TREE objectid 257 offset 786432 count 1 The duplicated EXTENT_ITEMs originally come from wrongly split extent_map in extract_ordered_extent(). Since extract_ordered_extent() uses create_io_em() to split an existing extent_map, we will have split->orig_start != split->start. Then, it will be logged with non-zero "extent data offset". Finally, the logged entries are replayed into a duplicated EXTENT_ITEM. Introduce and use proper splitting function for extent_map. The function is intended to be simple and specific usage for extract_ordered_extent() e.g. not supporting compression case (we do not allow splitting compressed extent_map anyway). There was a question raised by Qu, in summary why we want to split the extent map (and not the bio): The problem is not the limit on the zone end, which as you mention is the same as the block group end. The problem is that data write use zone append (ZA) operations. ZA BIOs cannot be split so a large extent may need to be processed with multiple ZA BIOs, While that is also true for regular writes, the major difference is that ZA are "nameless" write operation giving back the written sectors on completion. And ZA operations may be reordered by the block layer (not intentionally though). Combine both of these characteristics and you can see that the data for a large extent may end up being shuffled when written resulting in data corruption and the impossibility to map the extent to some start sector. To avoid this problem, zoned btrfs uses the principle "one data extent == one ZA BIO". So large extents need to be split. This is unfortunate, but we can revisit this later and optimize, e.g. merge back together the fragments of an extent once written if they actually were written sequentially in the zone. Reported-by: Damien Le Moal <[email protected]> Fixes: d22002f ("btrfs: zoned: split ordered extent when bio is sent") CC: [email protected] # 5.12+ CC: Johannes Thumshirn <[email protected]> Signed-off-by: Naohiro Aota <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent 79bd371 commit abb99cf

File tree

1 file changed

+118
-29
lines changed

1 file changed

+118
-29
lines changed

fs/btrfs/inode.c

Lines changed: 118 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
22712271
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
22722272
}
22732273

2274+
/*
2275+
* Split an extent_map at [start, start + len]
2276+
*
2277+
* This function is intended to be used only for extract_ordered_extent().
2278+
*/
2279+
static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
2280+
u64 pre, u64 post)
2281+
{
2282+
struct extent_map_tree *em_tree = &inode->extent_tree;
2283+
struct extent_map *em;
2284+
struct extent_map *split_pre = NULL;
2285+
struct extent_map *split_mid = NULL;
2286+
struct extent_map *split_post = NULL;
2287+
int ret = 0;
2288+
int modified;
2289+
unsigned long flags;
2290+
2291+
/* Sanity check */
2292+
if (pre == 0 && post == 0)
2293+
return 0;
2294+
2295+
split_pre = alloc_extent_map();
2296+
if (pre)
2297+
split_mid = alloc_extent_map();
2298+
if (post)
2299+
split_post = alloc_extent_map();
2300+
if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
2301+
ret = -ENOMEM;
2302+
goto out;
2303+
}
2304+
2305+
ASSERT(pre + post < len);
2306+
2307+
lock_extent(&inode->io_tree, start, start + len - 1);
2308+
write_lock(&em_tree->lock);
2309+
em = lookup_extent_mapping(em_tree, start, len);
2310+
if (!em) {
2311+
ret = -EIO;
2312+
goto out_unlock;
2313+
}
2314+
2315+
ASSERT(em->len == len);
2316+
ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2317+
ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
2318+
2319+
flags = em->flags;
2320+
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
2321+
clear_bit(EXTENT_FLAG_LOGGING, &flags);
2322+
modified = !list_empty(&em->list);
2323+
2324+
/* First, replace the em with a new extent_map starting from * em->start */
2325+
split_pre->start = em->start;
2326+
split_pre->len = (pre ? pre : em->len - post);
2327+
split_pre->orig_start = split_pre->start;
2328+
split_pre->block_start = em->block_start;
2329+
split_pre->block_len = split_pre->len;
2330+
split_pre->orig_block_len = split_pre->block_len;
2331+
split_pre->ram_bytes = split_pre->len;
2332+
split_pre->flags = flags;
2333+
split_pre->compress_type = em->compress_type;
2334+
split_pre->generation = em->generation;
2335+
2336+
replace_extent_mapping(em_tree, em, split_pre, modified);
2337+
2338+
/*
2339+
* Now we only have an extent_map at:
2340+
* [em->start, em->start + pre] if pre != 0
2341+
* [em->start, em->start + em->len - post] if pre == 0
2342+
*/
2343+
2344+
if (pre) {
2345+
/* Insert the middle extent_map */
2346+
split_mid->start = em->start + pre;
2347+
split_mid->len = em->len - pre - post;
2348+
split_mid->orig_start = split_mid->start;
2349+
split_mid->block_start = em->block_start + pre;
2350+
split_mid->block_len = split_mid->len;
2351+
split_mid->orig_block_len = split_mid->block_len;
2352+
split_mid->ram_bytes = split_mid->len;
2353+
split_mid->flags = flags;
2354+
split_mid->compress_type = em->compress_type;
2355+
split_mid->generation = em->generation;
2356+
add_extent_mapping(em_tree, split_mid, modified);
2357+
}
2358+
2359+
if (post) {
2360+
split_post->start = em->start + em->len - post;
2361+
split_post->len = post;
2362+
split_post->orig_start = split_post->start;
2363+
split_post->block_start = em->block_start + em->len - post;
2364+
split_post->block_len = split_post->len;
2365+
split_post->orig_block_len = split_post->block_len;
2366+
split_post->ram_bytes = split_post->len;
2367+
split_post->flags = flags;
2368+
split_post->compress_type = em->compress_type;
2369+
split_post->generation = em->generation;
2370+
add_extent_mapping(em_tree, split_post, modified);
2371+
}
2372+
2373+
/* Once for us */
2374+
free_extent_map(em);
2375+
/* Once for the tree */
2376+
free_extent_map(em);
2377+
2378+
out_unlock:
2379+
write_unlock(&em_tree->lock);
2380+
unlock_extent(&inode->io_tree, start, start + len - 1);
2381+
out:
2382+
free_extent_map(split_pre);
2383+
free_extent_map(split_mid);
2384+
free_extent_map(split_post);
2385+
2386+
return ret;
2387+
}
2388+
22742389
static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
22752390
struct bio *bio, loff_t file_offset)
22762391
{
22772392
struct btrfs_ordered_extent *ordered;
2278-
struct extent_map *em = NULL, *em_new = NULL;
2279-
struct extent_map_tree *em_tree = &inode->extent_tree;
22802393
u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
2394+
u64 file_len;
22812395
u64 len = bio->bi_iter.bi_size;
22822396
u64 end = start + len;
22832397
u64 ordered_end;
@@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
23172431
goto out;
23182432
}
23192433

2434+
file_len = ordered->num_bytes;
23202435
pre = start - ordered->disk_bytenr;
23212436
post = ordered_end - end;
23222437

23232438
ret = btrfs_split_ordered_extent(ordered, pre, post);
23242439
if (ret)
23252440
goto out;
2326-
2327-
read_lock(&em_tree->lock);
2328-
em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
2329-
if (!em) {
2330-
read_unlock(&em_tree->lock);
2331-
ret = -EIO;
2332-
goto out;
2333-
}
2334-
read_unlock(&em_tree->lock);
2335-
2336-
ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2337-
/*
2338-
* We cannot reuse em_new here but have to create a new one, as
2339-
* unpin_extent_cache() expects the start of the extent map to be the
2340-
* logical offset of the file, which does not hold true anymore after
2341-
* splitting.
2342-
*/
2343-
em_new = create_io_em(inode, em->start + pre, len,
2344-
em->start + pre, em->block_start + pre, len,
2345-
len, len, BTRFS_COMPRESS_NONE,
2346-
BTRFS_ORDERED_REGULAR);
2347-
if (IS_ERR(em_new)) {
2348-
ret = PTR_ERR(em_new);
2349-
goto out;
2350-
}
2351-
free_extent_map(em_new);
2441+
ret = split_zoned_em(inode, file_offset, file_len, pre, post);
23522442

23532443
out:
2354-
free_extent_map(em);
23552444
btrfs_put_ordered_extent(ordered);
23562445

23572446
return errno_to_blk_status(ret);

0 commit comments

Comments
 (0)