19
19
typedef void * z_erofs_next_pcluster_t ;
20
20
21
21
struct z_erofs_bvec {
22
- struct page * page ;
22
+ union {
23
+ struct page * page ;
24
+ struct folio * folio ;
25
+ };
23
26
int offset ;
24
27
unsigned int end ;
25
28
};
@@ -1420,33 +1423,32 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1420
1423
struct page * page ;
1421
1424
int bs = i_blocksize (f -> inode );
1422
1425
1423
- /* Except for inplace pages , the entire page can be used for I/Os */
1426
+ /* Except for inplace folios , the entire folio can be used for I/Os */
1424
1427
bvec -> bv_offset = 0 ;
1425
1428
bvec -> bv_len = PAGE_SIZE ;
1426
1429
repeat :
1427
1430
spin_lock (& pcl -> obj .lockref .lock );
1428
1431
zbv = pcl -> compressed_bvecs [nr ];
1429
- page = zbv .page ;
1430
1432
spin_unlock (& pcl -> obj .lockref .lock );
1431
- if (!page )
1432
- goto out_allocpage ;
1433
+ if (!zbv . folio )
1434
+ goto out_allocfolio ;
1433
1435
1434
- bvec -> bv_page = page ;
1435
- DBG_BUGON (z_erofs_is_shortlived_page (page ));
1436
+ bvec -> bv_page = & zbv . folio -> page ;
1437
+ DBG_BUGON (z_erofs_is_shortlived_page (bvec -> bv_page ));
1436
1438
/*
1437
- * Handle preallocated cached pages . We tried to allocate such pages
1439
+ * Handle preallocated cached folios . We tried to allocate such folios
1438
1440
* without triggering direct reclaim. If allocation failed, inplace
1439
- * file-backed pages will be used instead.
1441
+ * file-backed folios will be used instead.
1440
1442
*/
1441
- if (page -> private == Z_EROFS_PREALLOCATED_PAGE ) {
1442
- set_page_private ( page , 0 ) ;
1443
+ if (zbv . folio -> private == ( void * ) Z_EROFS_PREALLOCATED_PAGE ) {
1444
+ zbv . folio -> private = 0 ;
1443
1445
tocache = true;
1444
1446
goto out_tocache ;
1445
1447
}
1446
1448
1447
- mapping = READ_ONCE (page -> mapping );
1449
+ mapping = READ_ONCE (zbv . folio -> mapping );
1448
1450
/*
1449
- * File-backed pages for inplace I/Os are all locked steady,
1451
+ * File-backed folios for inplace I/Os are all locked steady,
1450
1452
* therefore it is impossible for `mapping` to be NULL.
1451
1453
*/
1452
1454
if (mapping && mapping != mc ) {
@@ -1456,22 +1458,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1456
1458
return ;
1457
1459
}
1458
1460
1459
- lock_page (page );
1460
- /* the cached page is still in managed cache */
1461
- if (page -> mapping == mc ) {
1461
+ folio_lock (zbv .folio );
1462
+ if (zbv .folio -> mapping == mc ) {
1462
1463
/*
1463
- * The cached page is still available but without a valid
1464
- * `->private` pcluster hint. Let's reconnect them.
1464
+ * The cached folio is still in managed cache but without
1465
+ * a valid `->private` pcluster hint. Let's reconnect them.
1465
1466
*/
1466
- if (!PagePrivate ( page )) {
1467
- /* compressed_bvecs[] already takes a ref */
1468
- attach_page_private ( page , pcl );
1469
- put_page ( page );
1467
+ if (!folio_test_private ( zbv . folio )) {
1468
+ folio_attach_private ( zbv . folio , pcl );
1469
+ /* compressed_bvecs[] already takes a ref before */
1470
+ folio_put ( zbv . folio );
1470
1471
}
1471
1472
1472
1473
/* no need to submit if it is already up-to-date */
1473
- if (PageUptodate ( page )) {
1474
- unlock_page ( page );
1474
+ if (folio_test_uptodate ( zbv . folio )) {
1475
+ folio_unlock ( zbv . folio );
1475
1476
bvec -> bv_page = NULL ;
1476
1477
}
1477
1478
return ;
@@ -1481,32 +1482,32 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1481
1482
* It has been truncated, so it's unsafe to reuse this one. Let's
1482
1483
* allocate a new page for compressed data.
1483
1484
*/
1484
- DBG_BUGON (page -> mapping );
1485
+ DBG_BUGON (zbv . folio -> mapping );
1485
1486
tocache = true;
1486
- unlock_page ( page );
1487
- put_page ( page );
1488
- out_allocpage :
1487
+ folio_unlock ( zbv . folio );
1488
+ folio_put ( zbv . folio );
1489
+ out_allocfolio :
1489
1490
page = erofs_allocpage (& f -> pagepool , gfp | __GFP_NOFAIL );
1490
1491
spin_lock (& pcl -> obj .lockref .lock );
1491
- if (pcl -> compressed_bvecs [nr ].page ) {
1492
+ if (pcl -> compressed_bvecs [nr ].folio ) {
1492
1493
erofs_pagepool_add (& f -> pagepool , page );
1493
1494
spin_unlock (& pcl -> obj .lockref .lock );
1494
1495
cond_resched ();
1495
1496
goto repeat ;
1496
1497
}
1497
- pcl -> compressed_bvecs [nr ].page = page ;
1498
+ pcl -> compressed_bvecs [nr ].folio = zbv . folio = page_folio ( page ) ;
1498
1499
spin_unlock (& pcl -> obj .lockref .lock );
1499
1500
bvec -> bv_page = page ;
1500
1501
out_tocache :
1501
1502
if (!tocache || bs != PAGE_SIZE ||
1502
- add_to_page_cache_lru ( page , mc , pcl -> obj .index + nr , gfp )) {
1503
- /* turn into a temporary shortlived page (1 ref) */
1504
- set_page_private ( page , Z_EROFS_SHORTLIVED_PAGE ) ;
1503
+ filemap_add_folio ( mc , zbv . folio , pcl -> obj .index + nr , gfp )) {
1504
+ /* turn into a temporary shortlived folio (1 ref) */
1505
+ zbv . folio -> private = ( void * ) Z_EROFS_SHORTLIVED_PAGE ;
1505
1506
return ;
1506
1507
}
1507
- attach_page_private ( page , pcl );
1508
+ folio_attach_private ( zbv . folio , pcl );
1508
1509
/* drop a refcount added by allocpage (then 2 refs in total here) */
1509
- put_page ( page );
1510
+ folio_put ( zbv . folio );
1510
1511
}
1511
1512
1512
1513
static struct z_erofs_decompressqueue * jobqueue_init (struct super_block * sb ,
0 commit comments