@@ -44,12 +44,15 @@ __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
44
44
* A: Field should be accessed / updated in atomic for parallelized code.
45
45
*/
46
46
struct z_erofs_pcluster {
47
- struct erofs_workgroup obj ;
48
47
struct mutex lock ;
48
+ struct lockref lockref ;
49
49
50
50
/* A: point to next chained pcluster or TAILs */
51
51
z_erofs_next_pcluster_t next ;
52
52
53
+ /* I: start block address of this pcluster */
54
+ erofs_off_t index ;
55
+
53
56
/* L: the maximum decompression size of this round */
54
57
unsigned int length ;
55
58
@@ -108,7 +111,7 @@ struct z_erofs_decompressqueue {
108
111
109
112
static inline bool z_erofs_is_inline_pcluster (struct z_erofs_pcluster * pcl )
110
113
{
111
- return !pcl -> obj . index ;
114
+ return !pcl -> index ;
112
115
}
113
116
114
117
static inline unsigned int z_erofs_pclusterpages (struct z_erofs_pcluster * pcl )
@@ -548,7 +551,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
548
551
if (READ_ONCE (pcl -> compressed_bvecs [i ].page ))
549
552
continue ;
550
553
551
- page = find_get_page (mc , pcl -> obj . index + i );
554
+ page = find_get_page (mc , pcl -> index + i );
552
555
if (!page ) {
553
556
/* I/O is needed, no possible to decompress directly */
554
557
standalone = false;
@@ -564,13 +567,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
564
567
continue ;
565
568
set_page_private (newpage , Z_EROFS_PREALLOCATED_PAGE );
566
569
}
567
- spin_lock (& pcl -> obj . lockref .lock );
570
+ spin_lock (& pcl -> lockref .lock );
568
571
if (!pcl -> compressed_bvecs [i ].page ) {
569
572
pcl -> compressed_bvecs [i ].page = page ? page : newpage ;
570
- spin_unlock (& pcl -> obj . lockref .lock );
573
+ spin_unlock (& pcl -> lockref .lock );
571
574
continue ;
572
575
}
573
- spin_unlock (& pcl -> obj . lockref .lock );
576
+ spin_unlock (& pcl -> lockref .lock );
574
577
575
578
if (page )
576
579
put_page (page );
@@ -588,10 +591,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
588
591
589
592
/* (erofs_shrinker) disconnect cached encoded data with pclusters */
590
593
static int erofs_try_to_free_all_cached_folios (struct erofs_sb_info * sbi ,
591
- struct erofs_workgroup * grp )
594
+ struct z_erofs_pcluster * pcl )
592
595
{
593
- struct z_erofs_pcluster * const pcl =
594
- container_of (grp , struct z_erofs_pcluster , obj );
595
596
unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
596
597
struct folio * folio ;
597
598
int i ;
@@ -626,8 +627,8 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
626
627
return true;
627
628
628
629
ret = false;
629
- spin_lock (& pcl -> obj . lockref .lock );
630
- if (pcl -> obj . lockref .count <= 0 ) {
630
+ spin_lock (& pcl -> lockref .lock );
631
+ if (pcl -> lockref .count <= 0 ) {
631
632
DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
632
633
for (; bvec < end ; ++ bvec ) {
633
634
if (bvec -> page && page_folio (bvec -> page ) == folio ) {
@@ -638,7 +639,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
638
639
}
639
640
}
640
641
}
641
- spin_unlock (& pcl -> obj . lockref .lock );
642
+ spin_unlock (& pcl -> lockref .lock );
642
643
return ret ;
643
644
}
644
645
@@ -689,15 +690,15 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
689
690
690
691
if (exclusive ) {
691
692
/* give priority for inplaceio to use file pages first */
692
- spin_lock (& pcl -> obj . lockref .lock );
693
+ spin_lock (& pcl -> lockref .lock );
693
694
while (fe -> icur > 0 ) {
694
695
if (pcl -> compressed_bvecs [-- fe -> icur ].page )
695
696
continue ;
696
697
pcl -> compressed_bvecs [fe -> icur ] = * bvec ;
697
- spin_unlock (& pcl -> obj . lockref .lock );
698
+ spin_unlock (& pcl -> lockref .lock );
698
699
return 0 ;
699
700
}
700
- spin_unlock (& pcl -> obj . lockref .lock );
701
+ spin_unlock (& pcl -> lockref .lock );
701
702
702
703
/* otherwise, check if it can be used as a bvpage */
703
704
if (fe -> mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
@@ -710,20 +711,20 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
710
711
return ret ;
711
712
}
712
713
713
- static bool z_erofs_get_pcluster (struct erofs_workgroup * grp )
714
+ static bool z_erofs_get_pcluster (struct z_erofs_pcluster * pcl )
714
715
{
715
- if (lockref_get_not_zero (& grp -> lockref ))
716
+ if (lockref_get_not_zero (& pcl -> lockref ))
716
717
return true;
717
718
718
- spin_lock (& grp -> lockref .lock );
719
- if (__lockref_is_dead (& grp -> lockref )) {
720
- spin_unlock (& grp -> lockref .lock );
719
+ spin_lock (& pcl -> lockref .lock );
720
+ if (__lockref_is_dead (& pcl -> lockref )) {
721
+ spin_unlock (& pcl -> lockref .lock );
721
722
return false;
722
723
}
723
724
724
- if (!grp -> lockref .count ++ )
725
+ if (!pcl -> lockref .count ++ )
725
726
atomic_long_dec (& erofs_global_shrink_cnt );
726
- spin_unlock (& grp -> lockref .lock );
727
+ spin_unlock (& pcl -> lockref .lock );
727
728
return true;
728
729
}
729
730
@@ -733,8 +734,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
733
734
struct super_block * sb = fe -> inode -> i_sb ;
734
735
struct erofs_sb_info * sbi = EROFS_SB (sb );
735
736
bool ztailpacking = map -> m_flags & EROFS_MAP_META ;
736
- struct z_erofs_pcluster * pcl ;
737
- struct erofs_workgroup * grp , * pre ;
737
+ struct z_erofs_pcluster * pcl , * pre ;
738
738
int err ;
739
739
740
740
if (!(map -> m_flags & EROFS_MAP_ENCODED ) ||
@@ -748,8 +748,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
748
748
if (IS_ERR (pcl ))
749
749
return PTR_ERR (pcl );
750
750
751
- spin_lock_init (& pcl -> obj . lockref .lock );
752
- pcl -> obj . lockref .count = 1 ; /* one ref for this request */
751
+ spin_lock_init (& pcl -> lockref .lock );
752
+ pcl -> lockref .count = 1 ; /* one ref for this request */
753
753
pcl -> algorithmformat = map -> m_algorithmformat ;
754
754
pcl -> length = 0 ;
755
755
pcl -> partial = true;
@@ -767,13 +767,13 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
767
767
DBG_BUGON (!mutex_trylock (& pcl -> lock ));
768
768
769
769
if (ztailpacking ) {
770
- pcl -> obj . index = 0 ; /* which indicates ztailpacking */
770
+ pcl -> index = 0 ; /* which indicates ztailpacking */
771
771
} else {
772
- pcl -> obj . index = erofs_blknr (sb , map -> m_pa );
772
+ pcl -> index = erofs_blknr (sb , map -> m_pa );
773
773
while (1 ) {
774
774
xa_lock (& sbi -> managed_pslots );
775
- pre = __xa_cmpxchg (& sbi -> managed_pslots , grp -> index ,
776
- NULL , grp , GFP_KERNEL );
775
+ pre = __xa_cmpxchg (& sbi -> managed_pslots , pcl -> index ,
776
+ NULL , pcl , GFP_KERNEL );
777
777
if (!pre || xa_is_err (pre ) || z_erofs_get_pcluster (pre )) {
778
778
xa_unlock (& sbi -> managed_pslots );
779
779
break ;
@@ -786,8 +786,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
786
786
err = xa_err (pre );
787
787
goto err_out ;
788
788
} else if (pre ) {
789
- fe -> pcl = container_of (pre ,
790
- struct z_erofs_pcluster , obj );
789
+ fe -> pcl = pre ;
791
790
err = - EEXIST ;
792
791
goto err_out ;
793
792
}
@@ -807,7 +806,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
807
806
struct erofs_map_blocks * map = & fe -> map ;
808
807
struct super_block * sb = fe -> inode -> i_sb ;
809
808
erofs_blk_t blknr = erofs_blknr (sb , map -> m_pa );
810
- struct erofs_workgroup * grp = NULL ;
809
+ struct z_erofs_pcluster * pcl = NULL ;
811
810
int ret ;
812
811
813
812
DBG_BUGON (fe -> pcl );
@@ -817,9 +816,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
817
816
if (!(map -> m_flags & EROFS_MAP_META )) {
818
817
while (1 ) {
819
818
rcu_read_lock ();
820
- grp = xa_load (& EROFS_SB (sb )-> managed_pslots , blknr );
821
- if (!grp || z_erofs_get_pcluster (grp )) {
822
- DBG_BUGON (grp && blknr != grp -> index );
819
+ pcl = xa_load (& EROFS_SB (sb )-> managed_pslots , blknr );
820
+ if (!pcl || z_erofs_get_pcluster (pcl )) {
821
+ DBG_BUGON (pcl && blknr != pcl -> index );
823
822
rcu_read_unlock ();
824
823
break ;
825
824
}
@@ -830,8 +829,8 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
830
829
return - EFSCORRUPTED ;
831
830
}
832
831
833
- if (grp ) {
834
- fe -> pcl = container_of ( grp , struct z_erofs_pcluster , obj ) ;
832
+ if (pcl ) {
833
+ fe -> pcl = pcl ;
835
834
ret = - EEXIST ;
836
835
} else {
837
836
ret = z_erofs_register_pcluster (fe );
@@ -886,59 +885,51 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
886
885
struct z_erofs_pcluster , rcu ));
887
886
}
888
887
889
- static void erofs_workgroup_free_rcu (struct erofs_workgroup * grp )
890
- {
891
- struct z_erofs_pcluster * const pcl =
892
- container_of (grp , struct z_erofs_pcluster , obj );
893
-
894
- call_rcu (& pcl -> rcu , z_erofs_rcu_callback );
895
- }
896
-
897
888
static bool erofs_try_to_release_pcluster (struct erofs_sb_info * sbi ,
898
- struct erofs_workgroup * grp )
889
+ struct z_erofs_pcluster * pcl )
899
890
{
900
891
int free = false;
901
892
902
- spin_lock (& grp -> lockref .lock );
903
- if (grp -> lockref .count )
893
+ spin_lock (& pcl -> lockref .lock );
894
+ if (pcl -> lockref .count )
904
895
goto out ;
905
896
906
897
/*
907
898
* Note that all cached folios should be detached before deleted from
908
899
* the XArray. Otherwise some folios could be still attached to the
909
900
* orphan old pcluster when the new one is available in the tree.
910
901
*/
911
- if (erofs_try_to_free_all_cached_folios (sbi , grp ))
902
+ if (erofs_try_to_free_all_cached_folios (sbi , pcl ))
912
903
goto out ;
913
904
914
905
/*
915
906
* It's impossible to fail after the pcluster is freezed, but in order
916
907
* to avoid some race conditions, add a DBG_BUGON to observe this.
917
908
*/
918
- DBG_BUGON (__xa_erase (& sbi -> managed_pslots , grp -> index ) != grp );
909
+ DBG_BUGON (__xa_erase (& sbi -> managed_pslots , pcl -> index ) != pcl );
919
910
920
- lockref_mark_dead (& grp -> lockref );
911
+ lockref_mark_dead (& pcl -> lockref );
921
912
free = true;
922
913
out :
923
- spin_unlock (& grp -> lockref .lock );
914
+ spin_unlock (& pcl -> lockref .lock );
924
915
if (free ) {
925
916
atomic_long_dec (& erofs_global_shrink_cnt );
926
- erofs_workgroup_free_rcu ( grp );
917
+ call_rcu ( & pcl -> rcu , z_erofs_rcu_callback );
927
918
}
928
919
return free ;
929
920
}
930
921
931
922
unsigned long z_erofs_shrink_scan (struct erofs_sb_info * sbi ,
932
923
unsigned long nr_shrink )
933
924
{
934
- struct erofs_workgroup * grp ;
925
+ struct z_erofs_pcluster * pcl ;
935
926
unsigned int freed = 0 ;
936
927
unsigned long index ;
937
928
938
929
xa_lock (& sbi -> managed_pslots );
939
- xa_for_each (& sbi -> managed_pslots , index , grp ) {
930
+ xa_for_each (& sbi -> managed_pslots , index , pcl ) {
940
931
/* try to shrink each valid pcluster */
941
- if (!erofs_try_to_release_pcluster (sbi , grp ))
932
+ if (!erofs_try_to_release_pcluster (sbi , pcl ))
942
933
continue ;
943
934
xa_unlock (& sbi -> managed_pslots );
944
935
@@ -953,16 +944,14 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
953
944
954
945
static void z_erofs_put_pcluster (struct z_erofs_pcluster * pcl )
955
946
{
956
- struct erofs_workgroup * grp = & pcl -> obj ;
957
-
958
- if (lockref_put_or_lock (& grp -> lockref ))
947
+ if (lockref_put_or_lock (& pcl -> lockref ))
959
948
return ;
960
949
961
- DBG_BUGON (__lockref_is_dead (& grp -> lockref ));
962
- if (grp -> lockref .count == 1 )
950
+ DBG_BUGON (__lockref_is_dead (& pcl -> lockref ));
951
+ if (pcl -> lockref .count == 1 )
963
952
atomic_long_inc (& erofs_global_shrink_cnt );
964
- -- grp -> lockref .count ;
965
- spin_unlock (& grp -> lockref .lock );
953
+ -- pcl -> lockref .count ;
954
+ spin_unlock (& pcl -> lockref .lock );
966
955
}
967
956
968
957
static void z_erofs_pcluster_end (struct z_erofs_decompress_frontend * fe )
@@ -1497,9 +1486,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1497
1486
bvec -> bv_offset = 0 ;
1498
1487
bvec -> bv_len = PAGE_SIZE ;
1499
1488
repeat :
1500
- spin_lock (& pcl -> obj . lockref .lock );
1489
+ spin_lock (& pcl -> lockref .lock );
1501
1490
zbv = pcl -> compressed_bvecs [nr ];
1502
- spin_unlock (& pcl -> obj . lockref .lock );
1491
+ spin_unlock (& pcl -> lockref .lock );
1503
1492
if (!zbv .page )
1504
1493
goto out_allocfolio ;
1505
1494
@@ -1561,23 +1550,23 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1561
1550
folio_put (folio );
1562
1551
out_allocfolio :
1563
1552
page = __erofs_allocpage (& f -> pagepool , gfp , true);
1564
- spin_lock (& pcl -> obj . lockref .lock );
1553
+ spin_lock (& pcl -> lockref .lock );
1565
1554
if (unlikely (pcl -> compressed_bvecs [nr ].page != zbv .page )) {
1566
1555
if (page )
1567
1556
erofs_pagepool_add (& f -> pagepool , page );
1568
- spin_unlock (& pcl -> obj . lockref .lock );
1557
+ spin_unlock (& pcl -> lockref .lock );
1569
1558
cond_resched ();
1570
1559
goto repeat ;
1571
1560
}
1572
1561
pcl -> compressed_bvecs [nr ].page = page ? page : ERR_PTR (- ENOMEM );
1573
- spin_unlock (& pcl -> obj . lockref .lock );
1562
+ spin_unlock (& pcl -> lockref .lock );
1574
1563
bvec -> bv_page = page ;
1575
1564
if (!page )
1576
1565
return ;
1577
1566
folio = page_folio (page );
1578
1567
out_tocache :
1579
1568
if (!tocache || bs != PAGE_SIZE ||
1580
- filemap_add_folio (mc , folio , pcl -> obj . index + nr , gfp )) {
1569
+ filemap_add_folio (mc , folio , pcl -> index + nr , gfp )) {
1581
1570
/* turn into a temporary shortlived folio (1 ref) */
1582
1571
folio -> private = (void * )Z_EROFS_SHORTLIVED_PAGE ;
1583
1572
return ;
@@ -1709,7 +1698,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1709
1698
1710
1699
/* no device id here, thus it will always succeed */
1711
1700
mdev = (struct erofs_map_dev ) {
1712
- .m_pa = erofs_pos (sb , pcl -> obj . index ),
1701
+ .m_pa = erofs_pos (sb , pcl -> index ),
1713
1702
};
1714
1703
(void )erofs_map_dev (sb , & mdev );
1715
1704
0 commit comments