@@ -1197,8 +1197,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1197
1197
bool short_write ;
1198
1198
int err ;
1199
1199
1200
- for (i = 0 ; i < ap -> num_pages ; i ++ )
1201
- fuse_wait_on_page_writeback (inode , ap -> pages [i ]-> index );
1200
+ for (i = 0 ; i < ap -> num_folios ; i ++ )
1201
+ fuse_wait_on_folio_writeback (inode , ap -> folios [i ]);
1202
1202
1203
1203
fuse_write_args_fill (ia , ff , pos , count );
1204
1204
ia -> write .in .flags = fuse_write_flags (iocb );
@@ -1210,10 +1210,10 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1210
1210
err = - EIO ;
1211
1211
1212
1212
short_write = ia -> write .out .size < count ;
1213
- offset = ap -> descs [0 ].offset ;
1213
+ offset = ap -> folio_descs [0 ].offset ;
1214
1214
count = ia -> write .out .size ;
1215
- for (i = 0 ; i < ap -> num_pages ; i ++ ) {
1216
- struct folio * folio = page_folio ( ap -> pages [i ]) ;
1215
+ for (i = 0 ; i < ap -> num_folios ; i ++ ) {
1216
+ struct folio * folio = ap -> folios [i ];
1217
1217
1218
1218
if (err ) {
1219
1219
folio_clear_uptodate (folio );
@@ -1227,7 +1227,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1227
1227
}
1228
1228
offset = 0 ;
1229
1229
}
1230
- if (ia -> write .page_locked && (i == ap -> num_pages - 1 ))
1230
+ if (ia -> write .folio_locked && (i == ap -> num_folios - 1 ))
1231
1231
folio_unlock (folio );
1232
1232
folio_put (folio );
1233
1233
}
@@ -1243,11 +1243,12 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1243
1243
struct fuse_args_pages * ap = & ia -> ap ;
1244
1244
struct fuse_conn * fc = get_fuse_conn (mapping -> host );
1245
1245
unsigned offset = pos & (PAGE_SIZE - 1 );
1246
+ unsigned int nr_pages = 0 ;
1246
1247
size_t count = 0 ;
1247
1248
int err ;
1248
1249
1249
1250
ap -> args .in_pages = true;
1250
- ap -> descs [0 ].offset = offset ;
1251
+ ap -> folio_descs [0 ].offset = offset ;
1251
1252
1252
1253
do {
1253
1254
size_t tmp ;
@@ -1283,9 +1284,10 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1283
1284
}
1284
1285
1285
1286
err = 0 ;
1286
- ap -> pages [ap -> num_pages ] = & folio -> page ;
1287
- ap -> descs [ap -> num_pages ].length = tmp ;
1288
- ap -> num_pages ++ ;
1287
+ ap -> folios [ap -> num_folios ] = folio ;
1288
+ ap -> folio_descs [ap -> num_folios ].length = tmp ;
1289
+ ap -> num_folios ++ ;
1290
+ nr_pages ++ ;
1289
1291
1290
1292
count += tmp ;
1291
1293
pos += tmp ;
@@ -1300,13 +1302,13 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1300
1302
if (folio_test_uptodate (folio )) {
1301
1303
folio_unlock (folio );
1302
1304
} else {
1303
- ia -> write .page_locked = true;
1305
+ ia -> write .folio_locked = true;
1304
1306
break ;
1305
1307
}
1306
1308
if (!fc -> big_writes )
1307
1309
break ;
1308
1310
} while (iov_iter_count (ii ) && count < fc -> max_write &&
1309
- ap -> num_pages < max_pages && offset == 0 );
1311
+ nr_pages < max_pages && offset == 0 );
1310
1312
1311
1313
return count > 0 ? count : err ;
1312
1314
}
@@ -1340,8 +1342,9 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1340
1342
unsigned int nr_pages = fuse_wr_pages (pos , iov_iter_count (ii ),
1341
1343
fc -> max_pages );
1342
1344
1343
- ap -> pages = fuse_pages_alloc (nr_pages , GFP_KERNEL , & ap -> descs );
1344
- if (!ap -> pages ) {
1345
+ ap -> uses_folios = true;
1346
+ ap -> folios = fuse_folios_alloc (nr_pages , GFP_KERNEL , & ap -> folio_descs );
1347
+ if (!ap -> folios ) {
1345
1348
err = - ENOMEM ;
1346
1349
break ;
1347
1350
}
@@ -1363,7 +1366,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1363
1366
err = - EIO ;
1364
1367
}
1365
1368
}
1366
- kfree (ap -> pages );
1369
+ kfree (ap -> folios );
1367
1370
} while (!err && iov_iter_count (ii ));
1368
1371
1369
1372
fuse_write_update_attr (inode , pos , res );
0 commit comments