@@ -4569,7 +4569,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4569
4569
loff_t len , int mode )
4570
4570
{
4571
4571
struct inode * inode = file_inode (file );
4572
- struct address_space * mapping = file -> f_mapping ;
4573
4572
handle_t * handle = NULL ;
4574
4573
loff_t new_size = 0 ;
4575
4574
loff_t end = offset + len ;
@@ -4593,23 +4592,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4593
4592
return ret ;
4594
4593
}
4595
4594
4596
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
4597
- inode_dio_wait (inode );
4598
-
4599
- ret = file_modified (file );
4600
- if (ret )
4601
- return ret ;
4602
-
4603
- /*
4604
- * Prevent page faults from reinstantiating pages we have released
4605
- * from page cache.
4606
- */
4607
- filemap_invalidate_lock (mapping );
4608
-
4609
- ret = ext4_break_layouts (inode );
4610
- if (ret )
4611
- goto out_invalidate_lock ;
4612
-
4613
4595
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT ;
4614
4596
/* Preallocate the range including the unaligned edges */
4615
4597
if (!IS_ALIGNED (offset | end , blocksize )) {
@@ -4619,17 +4601,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4619
4601
ret = ext4_alloc_file_blocks (file , alloc_lblk , len_lblk ,
4620
4602
new_size , flags );
4621
4603
if (ret )
4622
- goto out_invalidate_lock ;
4604
+ return ret ;
4623
4605
}
4624
4606
4625
4607
ret = ext4_update_disksize_before_punch (inode , offset , len );
4626
4608
if (ret )
4627
- goto out_invalidate_lock ;
4609
+ return ret ;
4628
4610
4629
4611
/* Now release the pages and zero block aligned part of pages */
4630
4612
ret = ext4_truncate_page_cache_block_range (inode , offset , end );
4631
4613
if (ret )
4632
- goto out_invalidate_lock ;
4614
+ return ret ;
4633
4615
4634
4616
/* Zero range excluding the unaligned edges */
4635
4617
start_lblk = EXT4_B_TO_LBLK (inode , offset );
@@ -4641,11 +4623,11 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4641
4623
ret = ext4_alloc_file_blocks (file , start_lblk , zero_blks ,
4642
4624
new_size , flags );
4643
4625
if (ret )
4644
- goto out_invalidate_lock ;
4626
+ return ret ;
4645
4627
}
4646
4628
/* Finish zeroing out if it doesn't contain partial block */
4647
4629
if (IS_ALIGNED (offset | end , blocksize ))
4648
- goto out_invalidate_lock ;
4630
+ return ret ;
4649
4631
4650
4632
/*
4651
4633
* In worst case we have to writeout two nonadjacent unwritten
@@ -4658,7 +4640,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4658
4640
if (IS_ERR (handle )) {
4659
4641
ret = PTR_ERR (handle );
4660
4642
ext4_std_error (inode -> i_sb , ret );
4661
- goto out_invalidate_lock ;
4643
+ return ret ;
4662
4644
}
4663
4645
4664
4646
/* Zero out partial block at the edges of the range */
@@ -4678,8 +4660,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4678
4660
4679
4661
out_handle :
4680
4662
ext4_journal_stop (handle );
4681
- out_invalidate_lock :
4682
- filemap_invalidate_unlock (mapping );
4683
4663
return ret ;
4684
4664
}
4685
4665
@@ -4712,13 +4692,6 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
4712
4692
goto out ;
4713
4693
}
4714
4694
4715
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
4716
- inode_dio_wait (inode );
4717
-
4718
- ret = file_modified (file );
4719
- if (ret )
4720
- goto out ;
4721
-
4722
4695
ret = ext4_alloc_file_blocks (file , start_lblk , len_lblk , new_size ,
4723
4696
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT );
4724
4697
if (ret )
@@ -4743,6 +4716,7 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
4743
4716
long ext4_fallocate (struct file * file , int mode , loff_t offset , loff_t len )
4744
4717
{
4745
4718
struct inode * inode = file_inode (file );
4719
+ struct address_space * mapping = file -> f_mapping ;
4746
4720
int ret ;
4747
4721
4748
4722
/*
@@ -4766,6 +4740,29 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4766
4740
if (ret )
4767
4741
goto out_inode_lock ;
4768
4742
4743
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
4744
+ inode_dio_wait (inode );
4745
+
4746
+ ret = file_modified (file );
4747
+ if (ret )
4748
+ return ret ;
4749
+
4750
+ if ((mode & FALLOC_FL_MODE_MASK ) == FALLOC_FL_ALLOCATE_RANGE ) {
4751
+ ret = ext4_do_fallocate (file , offset , len , mode );
4752
+ goto out_inode_lock ;
4753
+ }
4754
+
4755
+ /*
4756
+ * Follow-up operations will drop page cache, hold invalidate lock
4757
+ * to prevent page faults from reinstantiating pages we have
4758
+ * released from page cache.
4759
+ */
4760
+ filemap_invalidate_lock (mapping );
4761
+
4762
+ ret = ext4_break_layouts (inode );
4763
+ if (ret )
4764
+ goto out_invalidate_lock ;
4765
+
4769
4766
if (mode & FALLOC_FL_PUNCH_HOLE )
4770
4767
ret = ext4_punch_hole (file , offset , len );
4771
4768
else if (mode & FALLOC_FL_COLLAPSE_RANGE )
@@ -4775,7 +4772,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4775
4772
else if (mode & FALLOC_FL_ZERO_RANGE )
4776
4773
ret = ext4_zero_range (file , offset , len , mode );
4777
4774
else
4778
- ret = ext4_do_fallocate (file , offset , len , mode );
4775
+ ret = - EOPNOTSUPP ;
4776
+
4777
+ out_invalidate_lock :
4778
+ filemap_invalidate_unlock (mapping );
4779
4779
out_inode_lock :
4780
4780
inode_unlock (inode );
4781
4781
return ret ;
@@ -5297,23 +5297,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5297
5297
if (end >= inode -> i_size )
5298
5298
return - EINVAL ;
5299
5299
5300
- /* Wait for existing dio to complete */
5301
- inode_dio_wait (inode );
5302
-
5303
- ret = file_modified (file );
5304
- if (ret )
5305
- return ret ;
5306
-
5307
- /*
5308
- * Prevent page faults from reinstantiating pages we have released from
5309
- * page cache.
5310
- */
5311
- filemap_invalidate_lock (mapping );
5312
-
5313
- ret = ext4_break_layouts (inode );
5314
- if (ret )
5315
- goto out_invalidate_lock ;
5316
-
5317
5300
/*
5318
5301
* Write tail of the last page before removed range and data that
5319
5302
* will be shifted since they will get removed from the page cache
@@ -5327,16 +5310,15 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5327
5310
if (!ret )
5328
5311
ret = filemap_write_and_wait_range (mapping , end , LLONG_MAX );
5329
5312
if (ret )
5330
- goto out_invalidate_lock ;
5313
+ return ret ;
5331
5314
5332
5315
truncate_pagecache (inode , start );
5333
5316
5334
5317
credits = ext4_writepage_trans_blocks (inode );
5335
5318
handle = ext4_journal_start (inode , EXT4_HT_TRUNCATE , credits );
5336
- if (IS_ERR (handle )) {
5337
- ret = PTR_ERR (handle );
5338
- goto out_invalidate_lock ;
5339
- }
5319
+ if (IS_ERR (handle ))
5320
+ return PTR_ERR (handle );
5321
+
5340
5322
ext4_fc_mark_ineligible (sb , EXT4_FC_REASON_FALLOC_RANGE , handle );
5341
5323
5342
5324
start_lblk = offset >> inode -> i_blkbits ;
@@ -5375,8 +5357,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5375
5357
5376
5358
out_handle :
5377
5359
ext4_journal_stop (handle );
5378
- out_invalidate_lock :
5379
- filemap_invalidate_unlock (mapping );
5380
5360
return ret ;
5381
5361
}
5382
5362
@@ -5417,40 +5397,22 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5417
5397
if (len > inode -> i_sb -> s_maxbytes - inode -> i_size )
5418
5398
return - EFBIG ;
5419
5399
5420
- /* Wait for existing dio to complete */
5421
- inode_dio_wait (inode );
5422
-
5423
- ret = file_modified (file );
5424
- if (ret )
5425
- return ret ;
5426
-
5427
- /*
5428
- * Prevent page faults from reinstantiating pages we have released from
5429
- * page cache.
5430
- */
5431
- filemap_invalidate_lock (mapping );
5432
-
5433
- ret = ext4_break_layouts (inode );
5434
- if (ret )
5435
- goto out_invalidate_lock ;
5436
-
5437
5400
/*
5438
5401
* Write out all dirty pages. Need to round down to align start offset
5439
5402
* to page size boundary for page size > block size.
5440
5403
*/
5441
5404
start = round_down (offset , PAGE_SIZE );
5442
5405
ret = filemap_write_and_wait_range (mapping , start , LLONG_MAX );
5443
5406
if (ret )
5444
- goto out_invalidate_lock ;
5407
+ return ret ;
5445
5408
5446
5409
truncate_pagecache (inode , start );
5447
5410
5448
5411
credits = ext4_writepage_trans_blocks (inode );
5449
5412
handle = ext4_journal_start (inode , EXT4_HT_TRUNCATE , credits );
5450
- if (IS_ERR (handle )) {
5451
- ret = PTR_ERR (handle );
5452
- goto out_invalidate_lock ;
5453
- }
5413
+ if (IS_ERR (handle ))
5414
+ return PTR_ERR (handle );
5415
+
5454
5416
ext4_fc_mark_ineligible (sb , EXT4_FC_REASON_FALLOC_RANGE , handle );
5455
5417
5456
5418
/* Expand file to avoid data loss if there is error while shifting */
@@ -5521,8 +5483,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5521
5483
5522
5484
out_handle :
5523
5485
ext4_journal_stop (handle );
5524
- out_invalidate_lock :
5525
- filemap_invalidate_unlock (mapping );
5526
5486
return ret ;
5527
5487
}
5528
5488
0 commit comments