Skip to content

Commit bf1aa03

Browse files
committed
erofs: sunset struct erofs_workgroup
`struct erofs_workgroup` was introduced to provide a unique header for all physically indexed objects. However, after big pclusters and shared pclusters are implemented upstream, it seems that all EROFS encoded data (which requires transformation) can be represented with `struct z_erofs_pcluster` directly. Move all members into `struct z_erofs_pcluster` for simplicity. Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9c91f95 commit bf1aa03

File tree

2 files changed

+60
-77
lines changed

2 files changed

+60
-77
lines changed

fs/erofs/internal.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -208,12 +208,6 @@ enum {
208208
EROFS_ZIP_CACHE_READAROUND
209209
};
210210

211-
/* basic unit of the workstation of a super_block */
212-
struct erofs_workgroup {
213-
pgoff_t index;
214-
struct lockref lockref;
215-
};
216-
217211
enum erofs_kmap_type {
218212
EROFS_NO_KMAP, /* don't map the buffer */
219213
EROFS_KMAP, /* use kmap_local_page() to map the buffer */

fs/erofs/zdata.c

Lines changed: 60 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,15 @@ __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
4444
* A: Field should be accessed / updated in atomic for parallelized code.
4545
*/
4646
struct z_erofs_pcluster {
47-
struct erofs_workgroup obj;
4847
struct mutex lock;
48+
struct lockref lockref;
4949

5050
/* A: point to next chained pcluster or TAILs */
5151
z_erofs_next_pcluster_t next;
5252

53+
/* I: start block address of this pcluster */
54+
erofs_off_t index;
55+
5356
/* L: the maximum decompression size of this round */
5457
unsigned int length;
5558

@@ -108,7 +111,7 @@ struct z_erofs_decompressqueue {
108111

109112
static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110113
{
111-
return !pcl->obj.index;
114+
return !pcl->index;
112115
}
113116

114117
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
@@ -548,7 +551,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
548551
if (READ_ONCE(pcl->compressed_bvecs[i].page))
549552
continue;
550553

551-
page = find_get_page(mc, pcl->obj.index + i);
554+
page = find_get_page(mc, pcl->index + i);
552555
if (!page) {
553556
/* I/O is needed, no possible to decompress directly */
554557
standalone = false;
@@ -564,13 +567,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
564567
continue;
565568
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
566569
}
567-
spin_lock(&pcl->obj.lockref.lock);
570+
spin_lock(&pcl->lockref.lock);
568571
if (!pcl->compressed_bvecs[i].page) {
569572
pcl->compressed_bvecs[i].page = page ? page : newpage;
570-
spin_unlock(&pcl->obj.lockref.lock);
573+
spin_unlock(&pcl->lockref.lock);
571574
continue;
572575
}
573-
spin_unlock(&pcl->obj.lockref.lock);
576+
spin_unlock(&pcl->lockref.lock);
574577

575578
if (page)
576579
put_page(page);
@@ -588,10 +591,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
588591

589592
/* (erofs_shrinker) disconnect cached encoded data with pclusters */
590593
static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
591-
struct erofs_workgroup *grp)
594+
struct z_erofs_pcluster *pcl)
592595
{
593-
struct z_erofs_pcluster *const pcl =
594-
container_of(grp, struct z_erofs_pcluster, obj);
595596
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
596597
struct folio *folio;
597598
int i;
@@ -626,8 +627,8 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
626627
return true;
627628

628629
ret = false;
629-
spin_lock(&pcl->obj.lockref.lock);
630-
if (pcl->obj.lockref.count <= 0) {
630+
spin_lock(&pcl->lockref.lock);
631+
if (pcl->lockref.count <= 0) {
631632
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
632633
for (; bvec < end; ++bvec) {
633634
if (bvec->page && page_folio(bvec->page) == folio) {
@@ -638,7 +639,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
638639
}
639640
}
640641
}
641-
spin_unlock(&pcl->obj.lockref.lock);
642+
spin_unlock(&pcl->lockref.lock);
642643
return ret;
643644
}
644645

@@ -689,15 +690,15 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
689690

690691
if (exclusive) {
691692
/* give priority for inplaceio to use file pages first */
692-
spin_lock(&pcl->obj.lockref.lock);
693+
spin_lock(&pcl->lockref.lock);
693694
while (fe->icur > 0) {
694695
if (pcl->compressed_bvecs[--fe->icur].page)
695696
continue;
696697
pcl->compressed_bvecs[fe->icur] = *bvec;
697-
spin_unlock(&pcl->obj.lockref.lock);
698+
spin_unlock(&pcl->lockref.lock);
698699
return 0;
699700
}
700-
spin_unlock(&pcl->obj.lockref.lock);
701+
spin_unlock(&pcl->lockref.lock);
701702

702703
/* otherwise, check if it can be used as a bvpage */
703704
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
@@ -710,20 +711,20 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
710711
return ret;
711712
}
712713

713-
static bool z_erofs_get_pcluster(struct erofs_workgroup *grp)
714+
static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
714715
{
715-
if (lockref_get_not_zero(&grp->lockref))
716+
if (lockref_get_not_zero(&pcl->lockref))
716717
return true;
717718

718-
spin_lock(&grp->lockref.lock);
719-
if (__lockref_is_dead(&grp->lockref)) {
720-
spin_unlock(&grp->lockref.lock);
719+
spin_lock(&pcl->lockref.lock);
720+
if (__lockref_is_dead(&pcl->lockref)) {
721+
spin_unlock(&pcl->lockref.lock);
721722
return false;
722723
}
723724

724-
if (!grp->lockref.count++)
725+
if (!pcl->lockref.count++)
725726
atomic_long_dec(&erofs_global_shrink_cnt);
726-
spin_unlock(&grp->lockref.lock);
727+
spin_unlock(&pcl->lockref.lock);
727728
return true;
728729
}
729730

@@ -733,8 +734,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
733734
struct super_block *sb = fe->inode->i_sb;
734735
struct erofs_sb_info *sbi = EROFS_SB(sb);
735736
bool ztailpacking = map->m_flags & EROFS_MAP_META;
736-
struct z_erofs_pcluster *pcl;
737-
struct erofs_workgroup *grp, *pre;
737+
struct z_erofs_pcluster *pcl, *pre;
738738
int err;
739739

740740
if (!(map->m_flags & EROFS_MAP_ENCODED) ||
@@ -748,8 +748,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
748748
if (IS_ERR(pcl))
749749
return PTR_ERR(pcl);
750750

751-
spin_lock_init(&pcl->obj.lockref.lock);
752-
pcl->obj.lockref.count = 1; /* one ref for this request */
751+
spin_lock_init(&pcl->lockref.lock);
752+
pcl->lockref.count = 1; /* one ref for this request */
753753
pcl->algorithmformat = map->m_algorithmformat;
754754
pcl->length = 0;
755755
pcl->partial = true;
@@ -767,13 +767,13 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
767767
DBG_BUGON(!mutex_trylock(&pcl->lock));
768768

769769
if (ztailpacking) {
770-
pcl->obj.index = 0; /* which indicates ztailpacking */
770+
pcl->index = 0; /* which indicates ztailpacking */
771771
} else {
772-
pcl->obj.index = erofs_blknr(sb, map->m_pa);
772+
pcl->index = erofs_blknr(sb, map->m_pa);
773773
while (1) {
774774
xa_lock(&sbi->managed_pslots);
775-
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
776-
NULL, grp, GFP_KERNEL);
775+
pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->index,
776+
NULL, pcl, GFP_KERNEL);
777777
if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
778778
xa_unlock(&sbi->managed_pslots);
779779
break;
@@ -786,8 +786,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
786786
err = xa_err(pre);
787787
goto err_out;
788788
} else if (pre) {
789-
fe->pcl = container_of(pre,
790-
struct z_erofs_pcluster, obj);
789+
fe->pcl = pre;
791790
err = -EEXIST;
792791
goto err_out;
793792
}
@@ -807,7 +806,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
807806
struct erofs_map_blocks *map = &fe->map;
808807
struct super_block *sb = fe->inode->i_sb;
809808
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
810-
struct erofs_workgroup *grp = NULL;
809+
struct z_erofs_pcluster *pcl = NULL;
811810
int ret;
812811

813812
DBG_BUGON(fe->pcl);
@@ -817,9 +816,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
817816
if (!(map->m_flags & EROFS_MAP_META)) {
818817
while (1) {
819818
rcu_read_lock();
820-
grp = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
821-
if (!grp || z_erofs_get_pcluster(grp)) {
822-
DBG_BUGON(grp && blknr != grp->index);
819+
pcl = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
820+
if (!pcl || z_erofs_get_pcluster(pcl)) {
821+
DBG_BUGON(pcl && blknr != pcl->index);
823822
rcu_read_unlock();
824823
break;
825824
}
@@ -830,8 +829,8 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
830829
return -EFSCORRUPTED;
831830
}
832831

833-
if (grp) {
834-
fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
832+
if (pcl) {
833+
fe->pcl = pcl;
835834
ret = -EEXIST;
836835
} else {
837836
ret = z_erofs_register_pcluster(fe);
@@ -886,59 +885,51 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
886885
struct z_erofs_pcluster, rcu));
887886
}
888887

889-
static void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
890-
{
891-
struct z_erofs_pcluster *const pcl =
892-
container_of(grp, struct z_erofs_pcluster, obj);
893-
894-
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
895-
}
896-
897888
static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
898-
struct erofs_workgroup *grp)
889+
struct z_erofs_pcluster *pcl)
899890
{
900891
int free = false;
901892

902-
spin_lock(&grp->lockref.lock);
903-
if (grp->lockref.count)
893+
spin_lock(&pcl->lockref.lock);
894+
if (pcl->lockref.count)
904895
goto out;
905896

906897
/*
907898
* Note that all cached folios should be detached before deleted from
908899
* the XArray. Otherwise some folios could be still attached to the
909900
* orphan old pcluster when the new one is available in the tree.
910901
*/
911-
if (erofs_try_to_free_all_cached_folios(sbi, grp))
902+
if (erofs_try_to_free_all_cached_folios(sbi, pcl))
912903
goto out;
913904

914905
/*
915906
* It's impossible to fail after the pcluster is freezed, but in order
916907
* to avoid some race conditions, add a DBG_BUGON to observe this.
917908
*/
918-
DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
909+
DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl);
919910

920-
lockref_mark_dead(&grp->lockref);
911+
lockref_mark_dead(&pcl->lockref);
921912
free = true;
922913
out:
923-
spin_unlock(&grp->lockref.lock);
914+
spin_unlock(&pcl->lockref.lock);
924915
if (free) {
925916
atomic_long_dec(&erofs_global_shrink_cnt);
926-
erofs_workgroup_free_rcu(grp);
917+
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
927918
}
928919
return free;
929920
}
930921

931922
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
932923
unsigned long nr_shrink)
933924
{
934-
struct erofs_workgroup *grp;
925+
struct z_erofs_pcluster *pcl;
935926
unsigned int freed = 0;
936927
unsigned long index;
937928

938929
xa_lock(&sbi->managed_pslots);
939-
xa_for_each(&sbi->managed_pslots, index, grp) {
930+
xa_for_each(&sbi->managed_pslots, index, pcl) {
940931
/* try to shrink each valid pcluster */
941-
if (!erofs_try_to_release_pcluster(sbi, grp))
932+
if (!erofs_try_to_release_pcluster(sbi, pcl))
942933
continue;
943934
xa_unlock(&sbi->managed_pslots);
944935

@@ -953,16 +944,14 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
953944

954945
static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl)
955946
{
956-
struct erofs_workgroup *grp = &pcl->obj;
957-
958-
if (lockref_put_or_lock(&grp->lockref))
947+
if (lockref_put_or_lock(&pcl->lockref))
959948
return;
960949

961-
DBG_BUGON(__lockref_is_dead(&grp->lockref));
962-
if (grp->lockref.count == 1)
950+
DBG_BUGON(__lockref_is_dead(&pcl->lockref));
951+
if (pcl->lockref.count == 1)
963952
atomic_long_inc(&erofs_global_shrink_cnt);
964-
--grp->lockref.count;
965-
spin_unlock(&grp->lockref.lock);
953+
--pcl->lockref.count;
954+
spin_unlock(&pcl->lockref.lock);
966955
}
967956

968957
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
@@ -1497,9 +1486,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14971486
bvec->bv_offset = 0;
14981487
bvec->bv_len = PAGE_SIZE;
14991488
repeat:
1500-
spin_lock(&pcl->obj.lockref.lock);
1489+
spin_lock(&pcl->lockref.lock);
15011490
zbv = pcl->compressed_bvecs[nr];
1502-
spin_unlock(&pcl->obj.lockref.lock);
1491+
spin_unlock(&pcl->lockref.lock);
15031492
if (!zbv.page)
15041493
goto out_allocfolio;
15051494

@@ -1561,23 +1550,23 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
15611550
folio_put(folio);
15621551
out_allocfolio:
15631552
page = __erofs_allocpage(&f->pagepool, gfp, true);
1564-
spin_lock(&pcl->obj.lockref.lock);
1553+
spin_lock(&pcl->lockref.lock);
15651554
if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
15661555
if (page)
15671556
erofs_pagepool_add(&f->pagepool, page);
1568-
spin_unlock(&pcl->obj.lockref.lock);
1557+
spin_unlock(&pcl->lockref.lock);
15691558
cond_resched();
15701559
goto repeat;
15711560
}
15721561
pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
1573-
spin_unlock(&pcl->obj.lockref.lock);
1562+
spin_unlock(&pcl->lockref.lock);
15741563
bvec->bv_page = page;
15751564
if (!page)
15761565
return;
15771566
folio = page_folio(page);
15781567
out_tocache:
15791568
if (!tocache || bs != PAGE_SIZE ||
1580-
filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
1569+
filemap_add_folio(mc, folio, pcl->index + nr, gfp)) {
15811570
/* turn into a temporary shortlived folio (1 ref) */
15821571
folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
15831572
return;
@@ -1709,7 +1698,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
17091698

17101699
/* no device id here, thus it will always succeed */
17111700
mdev = (struct erofs_map_dev) {
1712-
.m_pa = erofs_pos(sb, pcl->obj.index),
1701+
.m_pa = erofs_pos(sb, pcl->index),
17131702
};
17141703
(void)erofs_map_dev(sb, &mdev);
17151704

0 commit comments

Comments
 (0)