4
4
* https://www.huawei.com/
5
5
*/
6
6
#include "internal.h"
7
- #include <linux/pagevec.h>
8
7
9
8
struct page * erofs_allocpage (struct page * * pagepool , gfp_t gfp )
10
9
{
@@ -33,22 +32,21 @@ void erofs_release_pages(struct page **pagepool)
33
32
/* global shrink count (for all mounted EROFS instances) */
34
33
static atomic_long_t erofs_global_shrink_cnt ;
35
34
36
- static int erofs_workgroup_get (struct erofs_workgroup * grp )
35
+ static bool erofs_workgroup_get (struct erofs_workgroup * grp )
37
36
{
38
- int o ;
37
+ if (lockref_get_not_zero (& grp -> lockref ))
38
+ return true;
39
39
40
- repeat :
41
- o = erofs_wait_on_workgroup_freezed (grp );
42
- if (o <= 0 )
43
- return -1 ;
44
-
45
- if (atomic_cmpxchg (& grp -> refcount , o , o + 1 ) != o )
46
- goto repeat ;
40
+ spin_lock (& grp -> lockref .lock );
41
+ if (__lockref_is_dead (& grp -> lockref )) {
42
+ spin_unlock (& grp -> lockref .lock );
43
+ return false;
44
+ }
47
45
48
- /* decrease refcount paired by erofs_workgroup_put */
49
- if (o == 1 )
46
+ if (!grp -> lockref .count ++ )
50
47
atomic_long_dec (& erofs_global_shrink_cnt );
51
- return 0 ;
48
+ spin_unlock (& grp -> lockref .lock );
49
+ return true;
52
50
}
53
51
54
52
struct erofs_workgroup * erofs_find_workgroup (struct super_block * sb ,
@@ -61,7 +59,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
61
59
rcu_read_lock ();
62
60
grp = xa_load (& sbi -> managed_pslots , index );
63
61
if (grp ) {
64
- if (erofs_workgroup_get (grp )) {
62
+ if (! erofs_workgroup_get (grp )) {
65
63
/* prefer to relax rcu read side */
66
64
rcu_read_unlock ();
67
65
goto repeat ;
@@ -80,11 +78,10 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
80
78
struct erofs_workgroup * pre ;
81
79
82
80
/*
83
- * Bump up a reference count before making this visible
84
- * to others for the XArray in order to avoid potential
85
- * UAF without serialized by xa_lock.
81
+ * Bump up before making this visible to others for the XArray in order
82
+ * to avoid potential UAF without serialized by xa_lock.
86
83
*/
87
- atomic_inc (& grp -> refcount );
84
+ lockref_get (& grp -> lockref );
88
85
89
86
repeat :
90
87
xa_lock (& sbi -> managed_pslots );
@@ -93,13 +90,13 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
93
90
if (pre ) {
94
91
if (xa_is_err (pre )) {
95
92
pre = ERR_PTR (xa_err (pre ));
96
- } else if (erofs_workgroup_get (pre )) {
93
+ } else if (! erofs_workgroup_get (pre )) {
97
94
/* try to legitimize the current in-tree one */
98
95
xa_unlock (& sbi -> managed_pslots );
99
96
cond_resched ();
100
97
goto repeat ;
101
98
}
102
- atomic_dec (& grp -> refcount );
99
+ lockref_put_return (& grp -> lockref );
103
100
grp = pre ;
104
101
}
105
102
xa_unlock (& sbi -> managed_pslots );
@@ -112,38 +109,34 @@ static void __erofs_workgroup_free(struct erofs_workgroup *grp)
112
109
erofs_workgroup_free_rcu (grp );
113
110
}
114
111
115
- int erofs_workgroup_put (struct erofs_workgroup * grp )
112
+ void erofs_workgroup_put (struct erofs_workgroup * grp )
116
113
{
117
- int count = atomic_dec_return (& grp -> refcount );
114
+ if (lockref_put_or_lock (& grp -> lockref ))
115
+ return ;
118
116
119
- if (count == 1 )
117
+ DBG_BUGON (__lockref_is_dead (& grp -> lockref ));
118
+ if (grp -> lockref .count == 1 )
120
119
atomic_long_inc (& erofs_global_shrink_cnt );
121
- else if (!count )
122
- __erofs_workgroup_free (grp );
123
- return count ;
120
+ -- grp -> lockref .count ;
121
+ spin_unlock (& grp -> lockref .lock );
124
122
}
125
123
126
124
static bool erofs_try_to_release_workgroup (struct erofs_sb_info * sbi ,
127
125
struct erofs_workgroup * grp )
128
126
{
129
- /*
130
- * If managed cache is on, refcount of workgroups
131
- * themselves could be < 0 (freezed). In other words,
132
- * there is no guarantee that all refcounts > 0.
133
- */
134
- if (!erofs_workgroup_try_to_freeze (grp , 1 ))
135
- return false;
127
+ int free = false;
128
+
129
+ spin_lock (& grp -> lockref .lock );
130
+ if (grp -> lockref .count )
131
+ goto out ;
136
132
137
133
/*
138
- * Note that all cached pages should be unattached
139
- * before deleted from the XArray. Otherwise some
140
- * cached pages could be still attached to the orphan
141
- * old workgroup when the new one is available in the tree.
134
+ * Note that all cached pages should be detached before deleted from
135
+ * the XArray. Otherwise some cached pages could be still attached to
136
+ * the orphan old workgroup when the new one is available in the tree.
142
137
*/
143
- if (erofs_try_to_free_all_cached_pages (sbi , grp )) {
144
- erofs_workgroup_unfreeze (grp , 1 );
145
- return false;
146
- }
138
+ if (erofs_try_to_free_all_cached_pages (sbi , grp ))
139
+ goto out ;
147
140
148
141
/*
149
142
* It's impossible to fail after the workgroup is freezed,
@@ -152,10 +145,13 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
152
145
*/
153
146
DBG_BUGON (__xa_erase (& sbi -> managed_pslots , grp -> index ) != grp );
154
147
155
- /* last refcount should be connected with its managed pslot. */
156
- erofs_workgroup_unfreeze (grp , 0 );
157
- __erofs_workgroup_free (grp );
158
- return true;
148
+ lockref_mark_dead (& grp -> lockref );
149
+ free = true;
150
+ out :
151
+ spin_unlock (& grp -> lockref .lock );
152
+ if (free )
153
+ __erofs_workgroup_free (grp );
154
+ return free ;
159
155
}
160
156
161
157
static unsigned long erofs_shrink_workstation (struct erofs_sb_info * sbi ,
0 commit comments