21
21
#include <linux/filelock.h>
22
22
#include <linux/splice.h>
23
23
#include <linux/task_io_accounting_ops.h>
24
+ #include <linux/iomap.h>
24
25
25
26
static int fuse_send_open (struct fuse_mount * fm , u64 nodeid ,
26
27
unsigned int open_flags , int opcode ,
@@ -788,12 +789,16 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
788
789
}
789
790
}
790
791
791
- static int fuse_do_readfolio (struct file * file , struct folio * folio )
792
+ static int fuse_do_readfolio (struct file * file , struct folio * folio ,
793
+ size_t off , size_t len )
792
794
{
793
795
struct inode * inode = folio -> mapping -> host ;
794
796
struct fuse_mount * fm = get_fuse_mount (inode );
795
- loff_t pos = folio_pos (folio );
796
- struct fuse_folio_desc desc = { .length = folio_size (folio ) };
797
+ loff_t pos = folio_pos (folio ) + off ;
798
+ struct fuse_folio_desc desc = {
799
+ .offset = off ,
800
+ .length = len ,
801
+ };
797
802
struct fuse_io_args ia = {
798
803
.ap .args .page_zeroing = true,
799
804
.ap .args .out_pages = true,
@@ -820,8 +825,6 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio)
820
825
if (res < desc .length )
821
826
fuse_short_read (inode , attr_ver , res , & ia .ap );
822
827
823
- folio_mark_uptodate (folio );
824
-
825
828
return 0 ;
826
829
}
827
830
@@ -834,13 +837,26 @@ static int fuse_read_folio(struct file *file, struct folio *folio)
834
837
if (fuse_is_bad (inode ))
835
838
goto out ;
836
839
837
- err = fuse_do_readfolio (file , folio );
840
+ err = fuse_do_readfolio (file , folio , 0 , folio_size (folio ));
841
+ if (!err )
842
+ folio_mark_uptodate (folio );
843
+
838
844
fuse_invalidate_atime (inode );
839
845
out :
840
846
folio_unlock (folio );
841
847
return err ;
842
848
}
843
849
850
+ static int fuse_iomap_read_folio_range (const struct iomap_iter * iter ,
851
+ struct folio * folio , loff_t pos ,
852
+ size_t len )
853
+ {
854
+ struct file * file = iter -> private ;
855
+ size_t off = offset_in_folio (folio , pos );
856
+
857
+ return fuse_do_readfolio (file , folio , off , len );
858
+ }
859
+
844
860
static void fuse_readpages_end (struct fuse_mount * fm , struct fuse_args * args ,
845
861
int err )
846
862
{
@@ -1375,6 +1391,24 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
1375
1391
}
1376
1392
}
1377
1393
1394
+ static const struct iomap_write_ops fuse_iomap_write_ops = {
1395
+ .read_folio_range = fuse_iomap_read_folio_range ,
1396
+ };
1397
+
1398
+ static int fuse_iomap_begin (struct inode * inode , loff_t offset , loff_t length ,
1399
+ unsigned int flags , struct iomap * iomap ,
1400
+ struct iomap * srcmap )
1401
+ {
1402
+ iomap -> type = IOMAP_MAPPED ;
1403
+ iomap -> length = length ;
1404
+ iomap -> offset = offset ;
1405
+ return 0 ;
1406
+ }
1407
+
1408
+ static const struct iomap_ops fuse_iomap_ops = {
1409
+ .iomap_begin = fuse_iomap_begin ,
1410
+ };
1411
+
1378
1412
static ssize_t fuse_cache_write_iter (struct kiocb * iocb , struct iov_iter * from )
1379
1413
{
1380
1414
struct file * file = iocb -> ki_filp ;
@@ -1384,6 +1418,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1384
1418
struct inode * inode = mapping -> host ;
1385
1419
ssize_t err , count ;
1386
1420
struct fuse_conn * fc = get_fuse_conn (inode );
1421
+ bool writeback = false;
1387
1422
1388
1423
if (fc -> writeback_cache ) {
1389
1424
/* Update size (EOF optimization) and mode (SUID clearing) */
@@ -1392,16 +1427,11 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1392
1427
if (err )
1393
1428
return err ;
1394
1429
1395
- if (fc -> handle_killpriv_v2 &&
1396
- setattr_should_drop_suidgid (idmap ,
1397
- file_inode (file ))) {
1398
- goto writethrough ;
1399
- }
1400
-
1401
- return generic_file_write_iter (iocb , from );
1430
+ if (!fc -> handle_killpriv_v2 ||
1431
+ !setattr_should_drop_suidgid (idmap , file_inode (file )))
1432
+ writeback = true;
1402
1433
}
1403
1434
1404
- writethrough :
1405
1435
inode_lock (inode );
1406
1436
1407
1437
err = count = generic_write_checks (iocb , from );
@@ -1420,6 +1450,15 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1420
1450
goto out ;
1421
1451
written = direct_write_fallback (iocb , from , written ,
1422
1452
fuse_perform_write (iocb , from ));
1453
+ } else if (writeback ) {
1454
+ /*
1455
+ * Use iomap so that we can do granular uptodate reads
1456
+ * and granular dirty tracking for large folios.
1457
+ */
1458
+ written = iomap_file_buffered_write (iocb , from ,
1459
+ & fuse_iomap_ops ,
1460
+ & fuse_iomap_write_ops ,
1461
+ file );
1423
1462
} else {
1424
1463
written = fuse_perform_write (iocb , from );
1425
1464
}
@@ -2209,84 +2248,6 @@ static int fuse_writepages(struct address_space *mapping,
2209
2248
return err ;
2210
2249
}
2211
2250
2212
- /*
2213
- * It's worthy to make sure that space is reserved on disk for the write,
2214
- * but how to implement it without killing performance need more thinking.
2215
- */
2216
- static int fuse_write_begin (struct file * file , struct address_space * mapping ,
2217
- loff_t pos , unsigned len , struct folio * * foliop , void * * fsdata )
2218
- {
2219
- pgoff_t index = pos >> PAGE_SHIFT ;
2220
- struct fuse_conn * fc = get_fuse_conn (file_inode (file ));
2221
- struct folio * folio ;
2222
- loff_t fsize ;
2223
- int err = - ENOMEM ;
2224
-
2225
- WARN_ON (!fc -> writeback_cache );
2226
-
2227
- folio = __filemap_get_folio (mapping , index , FGP_WRITEBEGIN ,
2228
- mapping_gfp_mask (mapping ));
2229
- if (IS_ERR (folio ))
2230
- goto error ;
2231
-
2232
- if (folio_test_uptodate (folio ) || len >= folio_size (folio ))
2233
- goto success ;
2234
- /*
2235
- * Check if the start of this folio comes after the end of file,
2236
- * in which case the readpage can be optimized away.
2237
- */
2238
- fsize = i_size_read (mapping -> host );
2239
- if (fsize <= folio_pos (folio )) {
2240
- size_t off = offset_in_folio (folio , pos );
2241
- if (off )
2242
- folio_zero_segment (folio , 0 , off );
2243
- goto success ;
2244
- }
2245
- err = fuse_do_readfolio (file , folio );
2246
- if (err )
2247
- goto cleanup ;
2248
- success :
2249
- * foliop = folio ;
2250
- return 0 ;
2251
-
2252
- cleanup :
2253
- folio_unlock (folio );
2254
- folio_put (folio );
2255
- error :
2256
- return err ;
2257
- }
2258
-
2259
- static int fuse_write_end (struct file * file , struct address_space * mapping ,
2260
- loff_t pos , unsigned len , unsigned copied ,
2261
- struct folio * folio , void * fsdata )
2262
- {
2263
- struct inode * inode = folio -> mapping -> host ;
2264
-
2265
- /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2266
- if (!copied )
2267
- goto unlock ;
2268
-
2269
- pos += copied ;
2270
- if (!folio_test_uptodate (folio )) {
2271
- /* Zero any unwritten bytes at the end of the page */
2272
- size_t endoff = pos & ~PAGE_MASK ;
2273
- if (endoff )
2274
- folio_zero_segment (folio , endoff , PAGE_SIZE );
2275
- folio_mark_uptodate (folio );
2276
- }
2277
-
2278
- if (pos > inode -> i_size )
2279
- i_size_write (inode , pos );
2280
-
2281
- folio_mark_dirty (folio );
2282
-
2283
- unlock :
2284
- folio_unlock (folio );
2285
- folio_put (folio );
2286
-
2287
- return copied ;
2288
- }
2289
-
2290
2251
static int fuse_launder_folio (struct folio * folio )
2291
2252
{
2292
2253
int err = 0 ;
@@ -3145,11 +3106,10 @@ static const struct address_space_operations fuse_file_aops = {
3145
3106
.writepages = fuse_writepages ,
3146
3107
.launder_folio = fuse_launder_folio ,
3147
3108
.dirty_folio = filemap_dirty_folio ,
3109
+ .release_folio = iomap_release_folio ,
3148
3110
.migrate_folio = filemap_migrate_folio ,
3149
3111
.bmap = fuse_bmap ,
3150
3112
.direct_IO = fuse_direct_IO ,
3151
- .write_begin = fuse_write_begin ,
3152
- .write_end = fuse_write_end ,
3153
3113
};
3154
3114
3155
3115
void fuse_init_file_inode (struct inode * inode , unsigned int flags )
0 commit comments