1919typedef void * z_erofs_next_pcluster_t ;
2020
2121struct z_erofs_bvec {
22- union {
23- struct page * page ;
24- struct folio * folio ;
25- };
22+ struct page * page ;
2623 int offset ;
2724 unsigned int end ;
2825};
@@ -617,42 +614,41 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
617614 fe -> mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE ;
618615}
619616
620- /* called by erofs_shrinker to get rid of all cached compressed bvecs */
617+ /* ( erofs_shrinker) disconnect cached encoded data with pclusters */
621618int erofs_try_to_free_all_cached_folios (struct erofs_sb_info * sbi ,
622619 struct erofs_workgroup * grp )
623620{
624621 struct z_erofs_pcluster * const pcl =
625622 container_of (grp , struct z_erofs_pcluster , obj );
626623 unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
624+ struct folio * folio ;
627625 int i ;
628626
629627 DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
630- /* There is no actice user since the pcluster is now freezed */
628+ /* Each cached folio contains one page unless bs > ps is supported */
631629 for (i = 0 ; i < pclusterpages ; ++ i ) {
632- struct folio * folio = pcl -> compressed_bvecs [i ].folio ;
630+ if (pcl -> compressed_bvecs [i ].page ) {
631+ folio = page_folio (pcl -> compressed_bvecs [i ].page );
632+ /* Avoid reclaiming or migrating this folio */
633+ if (!folio_trylock (folio ))
634+ return - EBUSY ;
633635
634- if (!folio )
635- continue ;
636-
637- /* Avoid reclaiming or migrating this folio */
638- if (!folio_trylock (folio ))
639- return - EBUSY ;
640-
641- if (!erofs_folio_is_managed (sbi , folio ))
642- continue ;
643- pcl -> compressed_bvecs [i ].folio = NULL ;
644- folio_detach_private (folio );
645- folio_unlock (folio );
636+ if (!erofs_folio_is_managed (sbi , folio ))
637+ continue ;
638+ pcl -> compressed_bvecs [i ].page = NULL ;
639+ folio_detach_private (folio );
640+ folio_unlock (folio );
641+ }
646642 }
647643 return 0 ;
648644}
649645
650646static bool z_erofs_cache_release_folio (struct folio * folio , gfp_t gfp )
651647{
652648 struct z_erofs_pcluster * pcl = folio_get_private (folio );
653- unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
649+ struct z_erofs_bvec * bvec = pcl -> compressed_bvecs ;
650+ struct z_erofs_bvec * end = bvec + z_erofs_pclusterpages (pcl );
654651 bool ret ;
655- int i ;
656652
657653 if (!folio_test_private (folio ))
658654 return true;
@@ -661,9 +657,9 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
661657 spin_lock (& pcl -> obj .lockref .lock );
662658 if (pcl -> obj .lockref .count <= 0 ) {
663659 DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
664- for (i = 0 ; i < pclusterpages ; ++ i ) {
665- if (pcl -> compressed_bvecs [ i ]. folio == folio ) {
666- pcl -> compressed_bvecs [ i ]. folio = NULL ;
660+ for (; bvec < end ; ++ bvec ) {
661+ if (bvec -> page && page_folio ( bvec -> page ) == folio ) {
662+ bvec -> page = NULL ;
667663 folio_detach_private (folio );
668664 ret = true;
669665 break ;
@@ -1062,7 +1058,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
10621058
10631059static bool z_erofs_page_is_invalidated (struct page * page )
10641060{
1065- return !page -> mapping && !z_erofs_is_shortlived_page (page );
1061+ return !page_folio ( page ) -> mapping && !z_erofs_is_shortlived_page (page );
10661062}
10671063
10681064struct z_erofs_decompress_backend {
@@ -1415,7 +1411,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14151411 bool tocache = false;
14161412 struct z_erofs_bvec zbv ;
14171413 struct address_space * mapping ;
1418- struct page * page ;
1414+ struct folio * folio ;
14191415 int bs = i_blocksize (f -> inode );
14201416
14211417 /* Except for inplace folios, the entire folio can be used for I/Os */
@@ -1425,23 +1421,25 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14251421 spin_lock (& pcl -> obj .lockref .lock );
14261422 zbv = pcl -> compressed_bvecs [nr ];
14271423 spin_unlock (& pcl -> obj .lockref .lock );
1428- if (!zbv .folio )
1424+ if (!zbv .page )
14291425 goto out_allocfolio ;
14301426
1431- bvec -> bv_page = & zbv .folio -> page ;
1427+ bvec -> bv_page = zbv .page ;
14321428 DBG_BUGON (z_erofs_is_shortlived_page (bvec -> bv_page ));
1429+
1430+ folio = page_folio (zbv .page );
14331431 /*
14341432 * Handle preallocated cached folios. We tried to allocate such folios
14351433 * without triggering direct reclaim. If allocation failed, inplace
14361434 * file-backed folios will be used instead.
14371435 */
1438- if (zbv . folio -> private == (void * )Z_EROFS_PREALLOCATED_PAGE ) {
1439- zbv . folio -> private = 0 ;
1436+ if (folio -> private == (void * )Z_EROFS_PREALLOCATED_PAGE ) {
1437+ folio -> private = 0 ;
14401438 tocache = true;
14411439 goto out_tocache ;
14421440 }
14431441
1444- mapping = READ_ONCE (zbv . folio -> mapping );
1442+ mapping = READ_ONCE (folio -> mapping );
14451443 /*
14461444 * File-backed folios for inplace I/Os are all locked steady,
14471445 * therefore it is impossible for `mapping` to be NULL.
@@ -1453,21 +1451,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14531451 return ;
14541452 }
14551453
1456- folio_lock (zbv . folio );
1457- if (zbv . folio -> mapping == mc ) {
1454+ folio_lock (folio );
1455+ if (folio -> mapping == mc ) {
14581456 /*
14591457 * The cached folio is still in managed cache but without
14601458 * a valid `->private` pcluster hint. Let's reconnect them.
14611459 */
1462- if (!folio_test_private (zbv . folio )) {
1463- folio_attach_private (zbv . folio , pcl );
1460+ if (!folio_test_private (folio )) {
1461+ folio_attach_private (folio , pcl );
14641462 /* compressed_bvecs[] already takes a ref before */
1465- folio_put (zbv . folio );
1463+ folio_put (folio );
14661464 }
14671465
14681466 /* no need to submit if it is already up-to-date */
1469- if (folio_test_uptodate (zbv . folio )) {
1470- folio_unlock (zbv . folio );
1467+ if (folio_test_uptodate (folio )) {
1468+ folio_unlock (folio );
14711469 bvec -> bv_page = NULL ;
14721470 }
14731471 return ;
@@ -1477,32 +1475,31 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14771475 * It has been truncated, so it's unsafe to reuse this one. Let's
14781476 * allocate a new page for compressed data.
14791477 */
1480- DBG_BUGON (zbv . folio -> mapping );
1478+ DBG_BUGON (folio -> mapping );
14811479 tocache = true;
1482- folio_unlock (zbv . folio );
1483- folio_put (zbv . folio );
1480+ folio_unlock (folio );
1481+ folio_put (folio );
14841482out_allocfolio :
1485- page = erofs_allocpage (& f -> pagepool , gfp | __GFP_NOFAIL );
1483+ zbv . page = erofs_allocpage (& f -> pagepool , gfp | __GFP_NOFAIL );
14861484 spin_lock (& pcl -> obj .lockref .lock );
1487- if (pcl -> compressed_bvecs [nr ].folio ) {
1488- erofs_pagepool_add (& f -> pagepool , page );
1485+ if (pcl -> compressed_bvecs [nr ].page ) {
1486+ erofs_pagepool_add (& f -> pagepool , zbv . page );
14891487 spin_unlock (& pcl -> obj .lockref .lock );
14901488 cond_resched ();
14911489 goto repeat ;
14921490 }
1493- pcl -> compressed_bvecs [nr ].folio = zbv .folio = page_folio (page );
1491+ bvec -> bv_page = pcl -> compressed_bvecs [nr ].page = zbv .page ;
1492+ folio = page_folio (zbv .page );
1493+ /* first mark it as a temporary shortlived folio (now 1 ref) */
1494+ folio -> private = (void * )Z_EROFS_SHORTLIVED_PAGE ;
14941495 spin_unlock (& pcl -> obj .lockref .lock );
1495- bvec -> bv_page = page ;
14961496out_tocache :
14971497 if (!tocache || bs != PAGE_SIZE ||
1498- filemap_add_folio (mc , zbv .folio , pcl -> obj .index + nr , gfp )) {
1499- /* turn into a temporary shortlived folio (1 ref) */
1500- zbv .folio -> private = (void * )Z_EROFS_SHORTLIVED_PAGE ;
1498+ filemap_add_folio (mc , folio , pcl -> obj .index + nr , gfp ))
15011499 return ;
1502- }
1503- folio_attach_private (zbv .folio , pcl );
1500+ folio_attach_private (folio , pcl );
15041501 /* drop a refcount added by allocpage (then 2 refs in total here) */
1505- folio_put (zbv . folio );
1502+ folio_put (folio );
15061503}
15071504
15081505static struct z_erofs_decompressqueue * jobqueue_init (struct super_block * sb ,
0 commit comments