Skip to content

Commit 3893c20

Browse files
committed
Merge tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs
Pull erofs updates from Gao Xiang: "A regression fix, several cleanups and (maybe) plus an upcoming new mount api convert patch as a part of vfs update are considered available for this cycle. All commits have been in linux-next and tested with no smoke out. Summary: - fix an out-of-bound read access introduced in v5.3, which could rarely cause data corruption - various cleanup patches" * tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: clean up z_erofs_submit_queue() erofs: fold in postsubmit_is_all_bypassed() erofs: fix out-of-bound read for shifted uncompressed block erofs: remove void tagging/untagging of workgroup pointers erofs: remove unused tag argument while registering a workgroup erofs: remove unused tag argument while finding a workgroup erofs: correct indentation of an assigned structure inside a function
2 parents 5307040 + 1e4a295 commit 3893c20

File tree

5 files changed

+74
-107
lines changed

5 files changed

+74
-107
lines changed

fs/erofs/decompressor.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
306306
}
307307

308308
src = kmap_atomic(*rq->in);
309-
if (!rq->out[0]) {
310-
dst = NULL;
311-
} else {
309+
if (rq->out[0]) {
312310
dst = kmap_atomic(rq->out[0]);
313311
memcpy(dst + rq->pageofs_out, src, righthalf);
312+
kunmap_atomic(dst);
314313
}
315314

316-
if (rq->out[1] == *rq->in) {
317-
memmove(src, src + righthalf, rq->pageofs_out);
318-
} else if (nrpages_out == 2) {
319-
if (dst)
320-
kunmap_atomic(dst);
315+
if (nrpages_out == 2) {
321316
DBG_BUGON(!rq->out[1]);
322-
dst = kmap_atomic(rq->out[1]);
323-
memcpy(dst, src + righthalf, rq->pageofs_out);
317+
if (rq->out[1] == *rq->in) {
318+
memmove(src, src + righthalf, rq->pageofs_out);
319+
} else {
320+
dst = kmap_atomic(rq->out[1]);
321+
memcpy(dst, src + righthalf, rq->pageofs_out);
322+
kunmap_atomic(dst);
323+
}
324324
}
325-
if (dst)
326-
kunmap_atomic(dst);
327325
kunmap_atomic(src);
328326
return 0;
329327
}

fs/erofs/internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
401401
#ifdef CONFIG_EROFS_FS_ZIP
402402
int erofs_workgroup_put(struct erofs_workgroup *grp);
403403
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
404-
pgoff_t index, bool *tag);
404+
pgoff_t index);
405405
int erofs_register_workgroup(struct super_block *sb,
406-
struct erofs_workgroup *grp, bool tag);
406+
struct erofs_workgroup *grp);
407407
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
408408
void erofs_shrinker_register(struct super_block *sb);
409409
void erofs_shrinker_unregister(struct super_block *sb);

fs/erofs/utils.c

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static int erofs_workgroup_get(struct erofs_workgroup *grp)
5959
}
6060

6161
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
62-
pgoff_t index, bool *tag)
62+
pgoff_t index)
6363
{
6464
struct erofs_sb_info *sbi = EROFS_SB(sb);
6565
struct erofs_workgroup *grp;
@@ -68,9 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
6868
rcu_read_lock();
6969
grp = radix_tree_lookup(&sbi->workstn_tree, index);
7070
if (grp) {
71-
*tag = xa_pointer_tag(grp);
72-
grp = xa_untag_pointer(grp);
73-
7471
if (erofs_workgroup_get(grp)) {
7572
/* prefer to relax rcu read side */
7673
rcu_read_unlock();
@@ -84,8 +81,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
8481
}
8582

8683
int erofs_register_workgroup(struct super_block *sb,
87-
struct erofs_workgroup *grp,
88-
bool tag)
84+
struct erofs_workgroup *grp)
8985
{
9086
struct erofs_sb_info *sbi;
9187
int err;
@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
10399
sbi = EROFS_SB(sb);
104100
xa_lock(&sbi->workstn_tree);
105101

106-
grp = xa_tag_pointer(grp, tag);
107-
108102
/*
109103
* Bump up reference count before making this workgroup
110104
* visible to other users in order to avoid potential UAF
@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
175169
* however in order to avoid some race conditions, add a
176170
* DBG_BUGON to observe this in advance.
177171
*/
178-
DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
179-
grp->index)) != grp);
172+
DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
180173

181174
/*
182175
* If managed cache is on, last refcount should indicate
@@ -201,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
201194
batch, first_index, PAGEVEC_SIZE);
202195

203196
for (i = 0; i < found; ++i) {
204-
struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
197+
struct erofs_workgroup *grp = batch[i];
205198

206199
first_index = grp->index + 1;
207200

fs/erofs/xattr.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
4646

4747
static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
4848
{
49-
static const struct xattr_handler *xattr_handler_map[] = {
50-
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
49+
static const struct xattr_handler *xattr_handler_map[] = {
50+
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
5151
#ifdef CONFIG_EROFS_FS_POSIX_ACL
52-
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
53-
[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
54-
&posix_acl_default_xattr_handler,
52+
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
53+
&posix_acl_access_xattr_handler,
54+
[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
55+
&posix_acl_default_xattr_handler,
5556
#endif
56-
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
57+
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
5758
#ifdef CONFIG_EROFS_FS_SECURITY
58-
[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
59+
[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
5960
#endif
60-
};
61+
};
6162

6263
return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
6364
xattr_handler_map[idx] : NULL;

fs/erofs/zdata.c

Lines changed: 49 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
345345
struct z_erofs_pcluster *pcl;
346346
struct z_erofs_collection *cl;
347347
unsigned int length;
348-
bool tag;
349348

350-
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
349+
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
351350
if (!grp)
352351
return -ENOENT;
353352

@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
438437
*/
439438
mutex_trylock(&cl->lock);
440439

441-
err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
440+
err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
442441
if (err) {
443442
mutex_unlock(&cl->lock);
444443
kmem_cache_free(pcluster_cachep, pcl);
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
11491148
qtail[JQ_BYPASS] = &pcl->next;
11501149
}
11511150

1152-
static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
1153-
unsigned int nr_bios, bool force_fg)
1154-
{
1155-
/*
1156-
* although background is preferred, no one is pending for submission.
1157-
* don't issue workqueue for decompression but drop it directly instead.
1158-
*/
1159-
if (force_fg || nr_bios)
1160-
return false;
1161-
1162-
kvfree(q[JQ_SUBMIT]);
1163-
return true;
1164-
}
1165-
1166-
static bool z_erofs_submit_queue(struct super_block *sb,
1151+
static void z_erofs_submit_queue(struct super_block *sb,
11671152
z_erofs_next_pcluster_t owned_head,
11681153
struct list_head *pagepool,
11691154
struct z_erofs_decompressqueue *fgq,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
11721157
struct erofs_sb_info *const sbi = EROFS_SB(sb);
11731158
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
11741159
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1175-
struct bio *bio;
11761160
void *bi_private;
11771161
/* since bio will be NULL, no need to initialize last_index */
11781162
pgoff_t uninitialized_var(last_index);
1179-
bool force_submit = false;
1180-
unsigned int nr_bios;
1181-
1182-
if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1183-
return false;
1163+
unsigned int nr_bios = 0;
1164+
struct bio *bio = NULL;
11841165

1185-
force_submit = false;
1186-
bio = NULL;
1187-
nr_bios = 0;
11881166
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
11891167
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
11901168
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
@@ -1194,67 +1172,60 @@ static bool z_erofs_submit_queue(struct super_block *sb,
11941172

11951173
do {
11961174
struct z_erofs_pcluster *pcl;
1197-
unsigned int clusterpages;
1198-
pgoff_t first_index;
1199-
struct page *page;
1200-
unsigned int i = 0, bypass = 0;
1201-
int err;
1175+
pgoff_t cur, end;
1176+
unsigned int i = 0;
1177+
bool bypass = true;
12021178

12031179
/* no possible 'owned_head' equals the following */
12041180
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
12051181
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
12061182

12071183
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
12081184

1209-
clusterpages = BIT(pcl->clusterbits);
1185+
cur = pcl->obj.index;
1186+
end = cur + BIT(pcl->clusterbits);
12101187

12111188
/* close the main owned chain at first */
12121189
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
12131190
Z_EROFS_PCLUSTER_TAIL_CLOSED);
12141191

1215-
first_index = pcl->obj.index;
1216-
force_submit |= (first_index != last_index + 1);
1192+
do {
1193+
struct page *page;
1194+
int err;
12171195

1218-
repeat:
1219-
page = pickup_page_for_submission(pcl, i, pagepool,
1220-
MNGD_MAPPING(sbi),
1221-
GFP_NOFS);
1222-
if (!page) {
1223-
force_submit = true;
1224-
++bypass;
1225-
goto skippage;
1226-
}
1196+
page = pickup_page_for_submission(pcl, i++, pagepool,
1197+
MNGD_MAPPING(sbi),
1198+
GFP_NOFS);
1199+
if (!page)
1200+
continue;
12271201

1228-
if (bio && force_submit) {
1202+
if (bio && cur != last_index + 1) {
12291203
submit_bio_retry:
1230-
submit_bio(bio);
1231-
bio = NULL;
1232-
}
1233-
1234-
if (!bio) {
1235-
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
1204+
submit_bio(bio);
1205+
bio = NULL;
1206+
}
12361207

1237-
bio->bi_end_io = z_erofs_decompressqueue_endio;
1238-
bio_set_dev(bio, sb->s_bdev);
1239-
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
1240-
LOG_SECTORS_PER_BLOCK;
1241-
bio->bi_private = bi_private;
1242-
bio->bi_opf = REQ_OP_READ;
1208+
if (!bio) {
1209+
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
12431210

1244-
++nr_bios;
1245-
}
1211+
bio->bi_end_io = z_erofs_decompressqueue_endio;
1212+
bio_set_dev(bio, sb->s_bdev);
1213+
bio->bi_iter.bi_sector = (sector_t)cur <<
1214+
LOG_SECTORS_PER_BLOCK;
1215+
bio->bi_private = bi_private;
1216+
bio->bi_opf = REQ_OP_READ;
1217+
++nr_bios;
1218+
}
12461219

1247-
err = bio_add_page(bio, page, PAGE_SIZE, 0);
1248-
if (err < PAGE_SIZE)
1249-
goto submit_bio_retry;
1220+
err = bio_add_page(bio, page, PAGE_SIZE, 0);
1221+
if (err < PAGE_SIZE)
1222+
goto submit_bio_retry;
12501223

1251-
force_submit = false;
1252-
last_index = first_index + i;
1253-
skippage:
1254-
if (++i < clusterpages)
1255-
goto repeat;
1224+
last_index = cur;
1225+
bypass = false;
1226+
} while (++cur < end);
12561227

1257-
if (bypass < clusterpages)
1228+
if (!bypass)
12581229
qtail[JQ_SUBMIT] = &pcl->next;
12591230
else
12601231
move_to_bypass_jobqueue(pcl, qtail, owned_head);
@@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb,
12631234
if (bio)
12641235
submit_bio(bio);
12651236

1266-
if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
1267-
return true;
1268-
1237+
/*
1238+
* although background is preferred, no one is pending for submission.
1239+
* don't issue workqueue for decompression but drop it directly instead.
1240+
*/
1241+
if (!*force_fg && !nr_bios) {
1242+
kvfree(q[JQ_SUBMIT]);
1243+
return;
1244+
}
12691245
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
1270-
return true;
12711246
}
12721247

12731248
static void z_erofs_runqueue(struct super_block *sb,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
12761251
{
12771252
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
12781253

1279-
if (!z_erofs_submit_queue(sb, clt->owned_head,
1280-
pagepool, io, &force_fg))
1254+
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
12811255
return;
1256+
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
12821257

12831258
/* handle bypass queue (no i/o pclusters) immediately */
12841259
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);

0 commit comments

Comments
 (0)