@@ -587,8 +587,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
587
587
}
588
588
589
589
/* (erofs_shrinker) disconnect cached encoded data with pclusters */
590
- int erofs_try_to_free_all_cached_folios (struct erofs_sb_info * sbi ,
591
- struct erofs_workgroup * grp )
590
+ static int erofs_try_to_free_all_cached_folios (struct erofs_sb_info * sbi ,
591
+ struct erofs_workgroup * grp )
592
592
{
593
593
struct z_erofs_pcluster * const pcl =
594
594
container_of (grp , struct z_erofs_pcluster , obj );
@@ -710,6 +710,23 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
710
710
return ret ;
711
711
}
712
712
713
+ static bool z_erofs_get_pcluster (struct erofs_workgroup * grp )
714
+ {
715
+ if (lockref_get_not_zero (& grp -> lockref ))
716
+ return true;
717
+
718
+ spin_lock (& grp -> lockref .lock );
719
+ if (__lockref_is_dead (& grp -> lockref )) {
720
+ spin_unlock (& grp -> lockref .lock );
721
+ return false;
722
+ }
723
+
724
+ if (!grp -> lockref .count ++ )
725
+ atomic_long_dec (& erofs_global_shrink_cnt );
726
+ spin_unlock (& grp -> lockref .lock );
727
+ return true;
728
+ }
729
+
713
730
static int z_erofs_register_pcluster (struct z_erofs_decompress_frontend * fe )
714
731
{
715
732
struct erofs_map_blocks * map = & fe -> map ;
@@ -757,7 +774,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
757
774
xa_lock (& sbi -> managed_pslots );
758
775
pre = __xa_cmpxchg (& sbi -> managed_pslots , grp -> index ,
759
776
NULL , grp , GFP_KERNEL );
760
- if (!pre || xa_is_err (pre ) || erofs_workgroup_get (pre )) {
777
+ if (!pre || xa_is_err (pre ) || z_erofs_get_pcluster (pre )) {
761
778
xa_unlock (& sbi -> managed_pslots );
762
779
break ;
763
780
}
@@ -801,7 +818,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
801
818
while (1 ) {
802
819
rcu_read_lock ();
803
820
grp = xa_load (& EROFS_SB (sb )-> managed_pslots , blknr );
804
- if (!grp || erofs_workgroup_get (grp )) {
821
+ if (!grp || z_erofs_get_pcluster (grp )) {
805
822
DBG_BUGON (grp && blknr != grp -> index );
806
823
rcu_read_unlock ();
807
824
break ;
@@ -869,14 +886,85 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
869
886
struct z_erofs_pcluster , rcu ));
870
887
}
871
888
872
- void erofs_workgroup_free_rcu (struct erofs_workgroup * grp )
889
+ static void erofs_workgroup_free_rcu (struct erofs_workgroup * grp )
873
890
{
874
891
struct z_erofs_pcluster * const pcl =
875
892
container_of (grp , struct z_erofs_pcluster , obj );
876
893
877
894
call_rcu (& pcl -> rcu , z_erofs_rcu_callback );
878
895
}
879
896
897
+ static bool erofs_try_to_release_pcluster (struct erofs_sb_info * sbi ,
898
+ struct erofs_workgroup * grp )
899
+ {
900
+ int free = false;
901
+
902
+ spin_lock (& grp -> lockref .lock );
903
+ if (grp -> lockref .count )
904
+ goto out ;
905
+
906
+ /*
907
+ * Note that all cached folios should be detached before deleted from
908
+ * the XArray. Otherwise some folios could be still attached to the
909
+ * orphan old pcluster when the new one is available in the tree.
910
+ */
911
+ if (erofs_try_to_free_all_cached_folios (sbi , grp ))
912
+ goto out ;
913
+
914
+ /*
915
+ * It's impossible to fail after the pcluster is freezed, but in order
916
+ * to avoid some race conditions, add a DBG_BUGON to observe this.
917
+ */
918
+ DBG_BUGON (__xa_erase (& sbi -> managed_pslots , grp -> index ) != grp );
919
+
920
+ lockref_mark_dead (& grp -> lockref );
921
+ free = true;
922
+ out :
923
+ spin_unlock (& grp -> lockref .lock );
924
+ if (free ) {
925
+ atomic_long_dec (& erofs_global_shrink_cnt );
926
+ erofs_workgroup_free_rcu (grp );
927
+ }
928
+ return free ;
929
+ }
930
+
931
+ unsigned long z_erofs_shrink_scan (struct erofs_sb_info * sbi ,
932
+ unsigned long nr_shrink )
933
+ {
934
+ struct erofs_workgroup * grp ;
935
+ unsigned int freed = 0 ;
936
+ unsigned long index ;
937
+
938
+ xa_lock (& sbi -> managed_pslots );
939
+ xa_for_each (& sbi -> managed_pslots , index , grp ) {
940
+ /* try to shrink each valid pcluster */
941
+ if (!erofs_try_to_release_pcluster (sbi , grp ))
942
+ continue ;
943
+ xa_unlock (& sbi -> managed_pslots );
944
+
945
+ ++ freed ;
946
+ if (!-- nr_shrink )
947
+ return freed ;
948
+ xa_lock (& sbi -> managed_pslots );
949
+ }
950
+ xa_unlock (& sbi -> managed_pslots );
951
+ return freed ;
952
+ }
953
+
954
+ static void z_erofs_put_pcluster (struct z_erofs_pcluster * pcl )
955
+ {
956
+ struct erofs_workgroup * grp = & pcl -> obj ;
957
+
958
+ if (lockref_put_or_lock (& grp -> lockref ))
959
+ return ;
960
+
961
+ DBG_BUGON (__lockref_is_dead (& grp -> lockref ));
962
+ if (grp -> lockref .count == 1 )
963
+ atomic_long_inc (& erofs_global_shrink_cnt );
964
+ -- grp -> lockref .count ;
965
+ spin_unlock (& grp -> lockref .lock );
966
+ }
967
+
880
968
static void z_erofs_pcluster_end (struct z_erofs_decompress_frontend * fe )
881
969
{
882
970
struct z_erofs_pcluster * pcl = fe -> pcl ;
@@ -895,7 +983,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
895
983
* any longer if the pcluster isn't hosted by ourselves.
896
984
*/
897
985
if (fe -> mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE )
898
- erofs_workgroup_put ( & pcl -> obj );
986
+ z_erofs_put_pcluster ( pcl );
899
987
900
988
fe -> pcl = NULL ;
901
989
}
@@ -1327,7 +1415,7 @@ static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1327
1415
if (z_erofs_is_inline_pcluster (be .pcl ))
1328
1416
z_erofs_free_pcluster (be .pcl );
1329
1417
else
1330
- erofs_workgroup_put ( & be .pcl -> obj );
1418
+ z_erofs_put_pcluster ( be .pcl );
1331
1419
}
1332
1420
return err ;
1333
1421
}
0 commit comments