@@ -68,8 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
68
68
rcu_read_lock ();
69
69
grp = radix_tree_lookup (& sbi -> workstn_tree , index );
70
70
if (grp ) {
71
- grp = xa_untag_pointer (grp );
72
-
73
71
if (erofs_workgroup_get (grp )) {
74
72
/* prefer to relax rcu read side */
75
73
rcu_read_unlock ();
@@ -101,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
101
99
sbi = EROFS_SB (sb );
102
100
xa_lock (& sbi -> workstn_tree );
103
101
104
- grp = xa_tag_pointer (grp , 0 );
105
-
106
102
/*
107
103
* Bump up reference count before making this workgroup
108
104
* visible to other users in order to avoid potential UAF
@@ -173,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
173
169
* however in order to avoid some race conditions, add a
174
170
* DBG_BUGON to observe this in advance.
175
171
*/
176
- DBG_BUGON (xa_untag_pointer (radix_tree_delete (& sbi -> workstn_tree ,
177
- grp -> index )) != grp );
172
+ DBG_BUGON (radix_tree_delete (& sbi -> workstn_tree , grp -> index ) != grp );
178
173
179
174
/*
180
175
* If managed cache is on, last refcount should indicate
@@ -199,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
199
194
batch , first_index , PAGEVEC_SIZE );
200
195
201
196
for (i = 0 ; i < found ; ++ i ) {
202
- struct erofs_workgroup * grp = xa_untag_pointer ( batch [i ]) ;
197
+ struct erofs_workgroup * grp = batch [i ];
203
198
204
199
first_index = grp -> index + 1 ;
205
200
0 commit comments