@@ -119,6 +119,12 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
119
119
return PAGE_ALIGN (pcl -> pclustersize ) >> PAGE_SHIFT ;
120
120
}
121
121
122
+ #define MNGD_MAPPING (sbi ) ((sbi)->managed_cache->i_mapping)
123
+ static bool erofs_folio_is_managed (struct erofs_sb_info * sbi , struct folio * fo )
124
+ {
125
+ return fo -> mapping == MNGD_MAPPING (sbi );
126
+ }
127
+
122
128
/*
123
129
* bit 30: I/O error occurred on this folio
124
130
* bit 0 - 29: remaining parts to complete this folio
@@ -611,37 +617,32 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
611
617
fe -> mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE ;
612
618
}
613
619
614
- /* called by erofs_shrinker to get rid of all compressed_pages */
615
- int erofs_try_to_free_all_cached_pages (struct erofs_sb_info * sbi ,
616
- struct erofs_workgroup * grp )
620
+ /* called by erofs_shrinker to get rid of all cached compressed bvecs */
621
+ int erofs_try_to_free_all_cached_folios (struct erofs_sb_info * sbi ,
622
+ struct erofs_workgroup * grp )
617
623
{
618
624
struct z_erofs_pcluster * const pcl =
619
625
container_of (grp , struct z_erofs_pcluster , obj );
620
626
unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
621
627
int i ;
622
628
623
629
DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
624
- /*
625
- * refcount of workgroup is now freezed as 0,
626
- * therefore no need to worry about available decompression users.
627
- */
630
+ /* There is no actice user since the pcluster is now freezed */
628
631
for (i = 0 ; i < pclusterpages ; ++ i ) {
629
- struct page * page = pcl -> compressed_bvecs [i ].page ;
632
+ struct folio * folio = pcl -> compressed_bvecs [i ].folio ;
630
633
631
- if (!page )
634
+ if (!folio )
632
635
continue ;
633
636
634
- /* block other users from reclaiming or migrating the page */
635
- if (!trylock_page ( page ))
637
+ /* Avoid reclaiming or migrating this folio */
638
+ if (!folio_trylock ( folio ))
636
639
return - EBUSY ;
637
640
638
- if (!erofs_page_is_managed (sbi , page ))
641
+ if (!erofs_folio_is_managed (sbi , folio ))
639
642
continue ;
640
-
641
- /* barrier is implied in the following 'unlock_page' */
642
- WRITE_ONCE (pcl -> compressed_bvecs [i ].page , NULL );
643
- detach_page_private (page );
644
- unlock_page (page );
643
+ pcl -> compressed_bvecs [i ].folio = NULL ;
644
+ folio_detach_private (folio );
645
+ folio_unlock (folio );
645
646
}
646
647
return 0 ;
647
648
}
@@ -658,20 +659,17 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
658
659
659
660
ret = false;
660
661
spin_lock (& pcl -> obj .lockref .lock );
661
- if (pcl -> obj .lockref .count > 0 )
662
- goto out ;
663
-
664
- DBG_BUGON ( z_erofs_is_inline_pcluster ( pcl ));
665
- for ( i = 0 ; i < pclusterpages ; ++ i ) {
666
- if ( pcl -> compressed_bvecs [ i ]. page == & folio -> page ) {
667
- WRITE_ONCE ( pcl -> compressed_bvecs [ i ]. page , NULL ) ;
668
- ret = true ;
669
- break ;
662
+ if (pcl -> obj .lockref .count <= 0 ) {
663
+ DBG_BUGON ( z_erofs_is_inline_pcluster ( pcl )) ;
664
+ for ( i = 0 ; i < pclusterpages ; ++ i ) {
665
+ if ( pcl -> compressed_bvecs [ i ]. folio == folio ) {
666
+ pcl -> compressed_bvecs [ i ]. folio = NULL ;
667
+ folio_detach_private ( folio );
668
+ ret = true ;
669
+ break ;
670
+ }
670
671
}
671
672
}
672
- if (ret )
673
- folio_detach_private (folio );
674
- out :
675
673
spin_unlock (& pcl -> obj .lockref .lock );
676
674
return ret ;
677
675
}
@@ -1201,7 +1199,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1201
1199
be -> compressed_pages [i ] = page ;
1202
1200
1203
1201
if (z_erofs_is_inline_pcluster (pcl ) ||
1204
- erofs_page_is_managed (EROFS_SB (be -> sb ), page )) {
1202
+ erofs_folio_is_managed (EROFS_SB (be -> sb ), page_folio ( page ) )) {
1205
1203
if (!PageUptodate (page ))
1206
1204
err = - EIO ;
1207
1205
continue ;
@@ -1286,7 +1284,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1286
1284
/* consider shortlived pages added when decompressing */
1287
1285
page = be -> compressed_pages [i ];
1288
1286
1289
- if (!page || erofs_page_is_managed (sbi , page ))
1287
+ if (!page ||
1288
+ erofs_folio_is_managed (sbi , page_folio (page )))
1290
1289
continue ;
1291
1290
(void )z_erofs_put_shortlivedpage (be -> pagepool , page );
1292
1291
WRITE_ONCE (pcl -> compressed_bvecs [i ].page , NULL );
@@ -1573,7 +1572,7 @@ static void z_erofs_submissionqueue_endio(struct bio *bio)
1573
1572
1574
1573
DBG_BUGON (folio_test_uptodate (folio ));
1575
1574
DBG_BUGON (z_erofs_page_is_invalidated (& folio -> page ));
1576
- if (!erofs_page_is_managed (EROFS_SB (q -> sb ), & folio -> page ))
1575
+ if (!erofs_folio_is_managed (EROFS_SB (q -> sb ), folio ))
1577
1576
continue ;
1578
1577
1579
1578
if (!err )
0 commit comments