Skip to content

Commit 9c91f95

Browse files
committed
erofs: move erofs_workgroup operations into zdata.c
Move related helpers into zdata.c as an intermediate step of getting rid of `struct erofs_workgroup`, and rename: erofs_workgroup_put => z_erofs_put_pcluster erofs_workgroup_get => z_erofs_get_pcluster erofs_try_to_release_workgroup => erofs_try_to_release_pcluster erofs_shrink_workstation => z_erofs_shrink_scan Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent b091e8e commit 9c91f95

File tree

3 files changed

+105
-112
lines changed

3 files changed

+105
-112
lines changed

fs/erofs/internal.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -456,17 +456,15 @@ static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
456456
void erofs_release_pages(struct page **pagepool);
457457

458458
#ifdef CONFIG_EROFS_FS_ZIP
459-
void erofs_workgroup_put(struct erofs_workgroup *grp);
460-
bool erofs_workgroup_get(struct erofs_workgroup *grp);
461-
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
459+
extern atomic_long_t erofs_global_shrink_cnt;
462460
void erofs_shrinker_register(struct super_block *sb);
463461
void erofs_shrinker_unregister(struct super_block *sb);
464462
int __init erofs_init_shrinker(void);
465463
void erofs_exit_shrinker(void);
466464
int __init z_erofs_init_subsystem(void);
467465
void z_erofs_exit_subsystem(void);
468-
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
469-
struct erofs_workgroup *egrp);
466+
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
467+
unsigned long nr_shrink);
470468
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
471469
int flags);
472470
void *z_erofs_get_gbuf(unsigned int requiredpages);

fs/erofs/zdata.c

Lines changed: 95 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -587,8 +587,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
587587
}
588588

589589
/* (erofs_shrinker) disconnect cached encoded data with pclusters */
590-
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
591-
struct erofs_workgroup *grp)
590+
static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
591+
struct erofs_workgroup *grp)
592592
{
593593
struct z_erofs_pcluster *const pcl =
594594
container_of(grp, struct z_erofs_pcluster, obj);
@@ -710,6 +710,23 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
710710
return ret;
711711
}
712712

713+
static bool z_erofs_get_pcluster(struct erofs_workgroup *grp)
714+
{
715+
if (lockref_get_not_zero(&grp->lockref))
716+
return true;
717+
718+
spin_lock(&grp->lockref.lock);
719+
if (__lockref_is_dead(&grp->lockref)) {
720+
spin_unlock(&grp->lockref.lock);
721+
return false;
722+
}
723+
724+
if (!grp->lockref.count++)
725+
atomic_long_dec(&erofs_global_shrink_cnt);
726+
spin_unlock(&grp->lockref.lock);
727+
return true;
728+
}
729+
713730
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
714731
{
715732
struct erofs_map_blocks *map = &fe->map;
@@ -757,7 +774,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
757774
xa_lock(&sbi->managed_pslots);
758775
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
759776
NULL, grp, GFP_KERNEL);
760-
if (!pre || xa_is_err(pre) || erofs_workgroup_get(pre)) {
777+
if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
761778
xa_unlock(&sbi->managed_pslots);
762779
break;
763780
}
@@ -801,7 +818,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
801818
while (1) {
802819
rcu_read_lock();
803820
grp = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
804-
if (!grp || erofs_workgroup_get(grp)) {
821+
if (!grp || z_erofs_get_pcluster(grp)) {
805822
DBG_BUGON(grp && blknr != grp->index);
806823
rcu_read_unlock();
807824
break;
@@ -869,14 +886,85 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
869886
struct z_erofs_pcluster, rcu));
870887
}
871888

872-
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
889+
static void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
873890
{
874891
struct z_erofs_pcluster *const pcl =
875892
container_of(grp, struct z_erofs_pcluster, obj);
876893

877894
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
878895
}
879896

897+
static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
898+
struct erofs_workgroup *grp)
899+
{
900+
int free = false;
901+
902+
spin_lock(&grp->lockref.lock);
903+
if (grp->lockref.count)
904+
goto out;
905+
906+
/*
907+
* Note that all cached folios should be detached before deleted from
908+
* the XArray. Otherwise some folios could be still attached to the
909+
* orphan old pcluster when the new one is available in the tree.
910+
*/
911+
if (erofs_try_to_free_all_cached_folios(sbi, grp))
912+
goto out;
913+
914+
/*
915+
* It's impossible to fail after the pcluster is freezed, but in order
916+
* to avoid some race conditions, add a DBG_BUGON to observe this.
917+
*/
918+
DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
919+
920+
lockref_mark_dead(&grp->lockref);
921+
free = true;
922+
out:
923+
spin_unlock(&grp->lockref.lock);
924+
if (free) {
925+
atomic_long_dec(&erofs_global_shrink_cnt);
926+
erofs_workgroup_free_rcu(grp);
927+
}
928+
return free;
929+
}
930+
931+
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
932+
unsigned long nr_shrink)
933+
{
934+
struct erofs_workgroup *grp;
935+
unsigned int freed = 0;
936+
unsigned long index;
937+
938+
xa_lock(&sbi->managed_pslots);
939+
xa_for_each(&sbi->managed_pslots, index, grp) {
940+
/* try to shrink each valid pcluster */
941+
if (!erofs_try_to_release_pcluster(sbi, grp))
942+
continue;
943+
xa_unlock(&sbi->managed_pslots);
944+
945+
++freed;
946+
if (!--nr_shrink)
947+
return freed;
948+
xa_lock(&sbi->managed_pslots);
949+
}
950+
xa_unlock(&sbi->managed_pslots);
951+
return freed;
952+
}
953+
954+
static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl)
955+
{
956+
struct erofs_workgroup *grp = &pcl->obj;
957+
958+
if (lockref_put_or_lock(&grp->lockref))
959+
return;
960+
961+
DBG_BUGON(__lockref_is_dead(&grp->lockref));
962+
if (grp->lockref.count == 1)
963+
atomic_long_inc(&erofs_global_shrink_cnt);
964+
--grp->lockref.count;
965+
spin_unlock(&grp->lockref.lock);
966+
}
967+
880968
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
881969
{
882970
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -895,7 +983,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
895983
* any longer if the pcluster isn't hosted by ourselves.
896984
*/
897985
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
898-
erofs_workgroup_put(&pcl->obj);
986+
z_erofs_put_pcluster(pcl);
899987

900988
fe->pcl = NULL;
901989
}
@@ -1327,7 +1415,7 @@ static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
13271415
if (z_erofs_is_inline_pcluster(be.pcl))
13281416
z_erofs_free_pcluster(be.pcl);
13291417
else
1330-
erofs_workgroup_put(&be.pcl->obj);
1418+
z_erofs_put_pcluster(be.pcl);
13311419
}
13321420
return err;
13331421
}

fs/erofs/zutil.c

Lines changed: 7 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
/*
33
* Copyright (C) 2018 HUAWEI, Inc.
44
* https://www.huawei.com/
5+
* Copyright (C) 2024 Alibaba Cloud
56
*/
67
#include "internal.h"
78

@@ -19,13 +20,12 @@ static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
1920
module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
2021
module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
2122

22-
static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
23-
/* protected by 'erofs_sb_list_lock' */
24-
static unsigned int shrinker_run_no;
23+
atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
2524

26-
/* protects the mounted 'erofs_sb_list' */
25+
/* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
2726
static DEFINE_SPINLOCK(erofs_sb_list_lock);
2827
static LIST_HEAD(erofs_sb_list);
28+
static unsigned int shrinker_run_no;
2929
static struct shrinker *erofs_shrinker_info;
3030

3131
static unsigned int z_erofs_gbuf_id(void)
@@ -214,97 +214,6 @@ void erofs_release_pages(struct page **pagepool)
214214
}
215215
}
216216

217-
bool erofs_workgroup_get(struct erofs_workgroup *grp)
218-
{
219-
if (lockref_get_not_zero(&grp->lockref))
220-
return true;
221-
222-
spin_lock(&grp->lockref.lock);
223-
if (__lockref_is_dead(&grp->lockref)) {
224-
spin_unlock(&grp->lockref.lock);
225-
return false;
226-
}
227-
228-
if (!grp->lockref.count++)
229-
atomic_long_dec(&erofs_global_shrink_cnt);
230-
spin_unlock(&grp->lockref.lock);
231-
return true;
232-
}
233-
234-
static void __erofs_workgroup_free(struct erofs_workgroup *grp)
235-
{
236-
atomic_long_dec(&erofs_global_shrink_cnt);
237-
erofs_workgroup_free_rcu(grp);
238-
}
239-
240-
void erofs_workgroup_put(struct erofs_workgroup *grp)
241-
{
242-
if (lockref_put_or_lock(&grp->lockref))
243-
return;
244-
245-
DBG_BUGON(__lockref_is_dead(&grp->lockref));
246-
if (grp->lockref.count == 1)
247-
atomic_long_inc(&erofs_global_shrink_cnt);
248-
--grp->lockref.count;
249-
spin_unlock(&grp->lockref.lock);
250-
}
251-
252-
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
253-
struct erofs_workgroup *grp)
254-
{
255-
int free = false;
256-
257-
spin_lock(&grp->lockref.lock);
258-
if (grp->lockref.count)
259-
goto out;
260-
261-
/*
262-
* Note that all cached pages should be detached before deleted from
263-
* the XArray. Otherwise some cached pages could be still attached to
264-
* the orphan old workgroup when the new one is available in the tree.
265-
*/
266-
if (erofs_try_to_free_all_cached_folios(sbi, grp))
267-
goto out;
268-
269-
/*
270-
* It's impossible to fail after the workgroup is freezed,
271-
* however in order to avoid some race conditions, add a
272-
* DBG_BUGON to observe this in advance.
273-
*/
274-
DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
275-
276-
lockref_mark_dead(&grp->lockref);
277-
free = true;
278-
out:
279-
spin_unlock(&grp->lockref.lock);
280-
if (free)
281-
__erofs_workgroup_free(grp);
282-
return free;
283-
}
284-
285-
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
286-
unsigned long nr_shrink)
287-
{
288-
struct erofs_workgroup *grp;
289-
unsigned int freed = 0;
290-
unsigned long index;
291-
292-
xa_lock(&sbi->managed_pslots);
293-
xa_for_each(&sbi->managed_pslots, index, grp) {
294-
/* try to shrink each valid workgroup */
295-
if (!erofs_try_to_release_workgroup(sbi, grp))
296-
continue;
297-
xa_unlock(&sbi->managed_pslots);
298-
299-
++freed;
300-
if (!--nr_shrink)
301-
return freed;
302-
xa_lock(&sbi->managed_pslots);
303-
}
304-
xa_unlock(&sbi->managed_pslots);
305-
return freed;
306-
}
307-
308217
void erofs_shrinker_register(struct super_block *sb)
309218
{
310219
struct erofs_sb_info *sbi = EROFS_SB(sb);
@@ -321,8 +230,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
321230
struct erofs_sb_info *const sbi = EROFS_SB(sb);
322231

323232
mutex_lock(&sbi->umount_mutex);
324-
/* clean up all remaining workgroups in memory */
325-
erofs_shrink_workstation(sbi, ~0UL);
233+
/* clean up all remaining pclusters in memory */
234+
z_erofs_shrink_scan(sbi, ~0UL);
326235

327236
spin_lock(&erofs_sb_list_lock);
328237
list_del(&sbi->list);
@@ -370,9 +279,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
370279

371280
spin_unlock(&erofs_sb_list_lock);
372281
sbi->shrinker_run_no = run_no;
373-
374-
freed += erofs_shrink_workstation(sbi, nr - freed);
375-
282+
freed += z_erofs_shrink_scan(sbi, nr - freed);
376283
spin_lock(&erofs_sb_list_lock);
377284
/* Get the next list element before we move this one */
378285
p = p->next;

0 commit comments

Comments
 (0)