Skip to content

Commit e31b283

Browse files
committed
Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs
Pull jffs2, ubi and ubifs updates from Richard Weinberger: "JFFS2: - Fix memory corruption in error path - Spelling and coding style fixes UBI: - Switch to BLK_MQ_F_BLOCKING in ubiblock - Wire up partent device (for sysfs) - Multiple UAF bugfixes - Fix for an infinite loop in WL error path UBIFS: - Fix for multiple memory leaks in error paths - Fixes for wrong space accounting - Minor cleanups - Spelling and coding style fixes" * tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: (36 commits) ubi: block: Fix a possible use-after-free bug in ubiblock_create() ubifs: make kobj_type structures constant mtd: ubi: block: wire-up device parent mtd: ubi: wire-up parent MTD device ubi: use correct names in function kernel-doc comments ubi: block: set BLK_MQ_F_BLOCKING jffs2: Fix list_del corruption if compressors initialized failed jffs2: Use function instead of macro when initialize compressors jffs2: fix spelling mistake "neccecary"->"necessary" ubifs: Fix kernel-doc ubifs: Fix some kernel-doc comments UBI: Fastmap: Fix kernel-doc ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show() ubi: fastmap: Fix missed fm_anchor PEB in wear-leveling after disabling fastmap ubifs: ubifs_releasepage: Remove ubifs_assert(0) to valid this process ubifs: ubifs_writepage: Mark page dirty after writing inode failed ubifs: dirty_cow_znode: Fix memleak in error handling path ubifs: Re-statistic cleaned znode count if commit failed ubi: Fix permission display of the debugfs files ...
2 parents 3808330 + 8fcf2d0 commit e31b283

File tree

24 files changed

+274
-168
lines changed

24 files changed

+274
-168
lines changed

drivers/mtd/ubi/block.c

Lines changed: 36 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
#include <linux/mutex.h>
3636
#include <linux/slab.h>
3737
#include <linux/mtd/ubi.h>
38-
#include <linux/workqueue.h>
3938
#include <linux/blkdev.h>
4039
#include <linux/blk-mq.h>
4140
#include <linux/hdreg.h>
@@ -62,7 +61,6 @@ struct ubiblock_param {
6261
};
6362

6463
struct ubiblock_pdu {
65-
struct work_struct work;
6664
struct ubi_sgl usgl;
6765
};
6866

@@ -82,8 +80,6 @@ struct ubiblock {
8280
struct gendisk *gd;
8381
struct request_queue *rq;
8482

85-
struct workqueue_struct *wq;
86-
8783
struct mutex dev_mutex;
8884
struct list_head list;
8985
struct blk_mq_tag_set tag_set;
@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
181177
return NULL;
182178
}
183179

184-
static int ubiblock_read(struct ubiblock_pdu *pdu)
180+
static blk_status_t ubiblock_read(struct request *req)
185181
{
186-
int ret, leb, offset, bytes_left, to_read;
187-
u64 pos;
188-
struct request *req = blk_mq_rq_from_pdu(pdu);
182+
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
189183
struct ubiblock *dev = req->q->queuedata;
184+
u64 pos = blk_rq_pos(req) << 9;
185+
int to_read = blk_rq_bytes(req);
186+
int bytes_left = to_read;
187+
/* Get LEB:offset address to read from */
188+
int offset = do_div(pos, dev->leb_size);
189+
int leb = pos;
190+
struct req_iterator iter;
191+
struct bio_vec bvec;
192+
int ret;
190193

191-
to_read = blk_rq_bytes(req);
192-
pos = blk_rq_pos(req) << 9;
194+
blk_mq_start_request(req);
193195

194-
/* Get LEB:offset address to read from */
195-
offset = do_div(pos, dev->leb_size);
196-
leb = pos;
197-
bytes_left = to_read;
196+
/*
197+
* It is safe to ignore the return value of blk_rq_map_sg() because
198+
* the number of sg entries is limited to UBI_MAX_SG_COUNT
199+
* and ubi_read_sg() will check that limit.
200+
*/
201+
ubi_sgl_init(&pdu->usgl);
202+
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
198203

199204
while (bytes_left) {
200205
/*
@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
206211

207212
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
208213
if (ret < 0)
209-
return ret;
214+
break;
210215

211216
bytes_left -= to_read;
212217
to_read = bytes_left;
213218
leb += 1;
214219
offset = 0;
215220
}
216-
return 0;
221+
222+
rq_for_each_segment(bvec, req, iter)
223+
flush_dcache_page(bvec.bv_page);
224+
return errno_to_blk_status(ret);
217225
}
218226

219227
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
289297
.getgeo = ubiblock_getgeo,
290298
};
291299

292-
static void ubiblock_do_work(struct work_struct *work)
293-
{
294-
int ret;
295-
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
296-
struct request *req = blk_mq_rq_from_pdu(pdu);
297-
struct req_iterator iter;
298-
struct bio_vec bvec;
299-
300-
blk_mq_start_request(req);
301-
302-
/*
303-
* It is safe to ignore the return value of blk_rq_map_sg() because
304-
* the number of sg entries is limited to UBI_MAX_SG_COUNT
305-
* and ubi_read_sg() will check that limit.
306-
*/
307-
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
308-
309-
ret = ubiblock_read(pdu);
310-
311-
rq_for_each_segment(bvec, req, iter)
312-
flush_dcache_page(bvec.bv_page);
313-
314-
blk_mq_end_request(req, errno_to_blk_status(ret));
315-
}
316-
317300
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
318301
const struct blk_mq_queue_data *bd)
319302
{
320-
struct request *req = bd->rq;
321-
struct ubiblock *dev = hctx->queue->queuedata;
322-
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
323-
324-
switch (req_op(req)) {
303+
switch (req_op(bd->rq)) {
325304
case REQ_OP_READ:
326-
ubi_sgl_init(&pdu->usgl);
327-
queue_work(dev->wq, &pdu->work);
328-
return BLK_STS_OK;
305+
return ubiblock_read(bd->rq);
329306
default:
330307
return BLK_STS_IOERR;
331308
}
332-
333309
}
334310

335311
static int ubiblock_init_request(struct blk_mq_tag_set *set,
@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
339315
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
340316

341317
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
342-
INIT_WORK(&pdu->work, ubiblock_do_work);
343-
344318
return 0;
345319
}
346320

@@ -354,9 +328,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
354328
u64 size = vi->used_bytes >> 9;
355329

356330
if (vi->used_bytes % 512) {
357-
pr_warn("UBI: block: volume size is not a multiple of 512, "
358-
"last %llu bytes are ignored!\n",
359-
vi->used_bytes - (size << 9));
331+
if (vi->vol_type == UBI_DYNAMIC_VOLUME)
332+
pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
333+
vi->used_bytes - (size << 9));
334+
else
335+
pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
336+
vi->used_bytes - (size << 9));
360337
}
361338

362339
if ((sector_t)size != size)
@@ -401,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
401378
dev->tag_set.ops = &ubiblock_mq_ops;
402379
dev->tag_set.queue_depth = 64;
403380
dev->tag_set.numa_node = NUMA_NO_NODE;
404-
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
381+
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
405382
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
406383
dev->tag_set.driver_data = dev;
407384
dev->tag_set.nr_hw_queues = 1;
@@ -439,32 +416,20 @@ int ubiblock_create(struct ubi_volume_info *vi)
439416
dev->rq = gd->queue;
440417
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
441418

442-
/*
443-
* Create one workqueue per volume (per registered block device).
444-
* Remember workqueues are cheap, they're not threads.
445-
*/
446-
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
447-
if (!dev->wq) {
448-
ret = -ENOMEM;
449-
goto out_remove_minor;
450-
}
451-
452419
list_add_tail(&dev->list, &ubiblock_devices);
453420

454421
/* Must be the last step: anyone can call file ops from now on */
455-
ret = add_disk(dev->gd);
422+
ret = device_add_disk(vi->dev, dev->gd, NULL);
456423
if (ret)
457-
goto out_destroy_wq;
424+
goto out_remove_minor;
458425

459426
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
460427
dev->ubi_num, dev->vol_id, vi->name);
461428
mutex_unlock(&devices_mutex);
462429
return 0;
463430

464-
out_destroy_wq:
465-
list_del(&dev->list);
466-
destroy_workqueue(dev->wq);
467431
out_remove_minor:
432+
list_del(&dev->list);
468433
idr_remove(&ubiblock_minor_idr, gd->first_minor);
469434
out_cleanup_disk:
470435
put_disk(dev->gd);
@@ -482,8 +447,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
482447
{
483448
/* Stop new requests to arrive */
484449
del_gendisk(dev->gd);
485-
/* Flush pending work */
486-
destroy_workqueue(dev->wq);
487450
/* Finally destroy the blk queue */
488451
dev_info(disk_to_dev(dev->gd), "released");
489452
put_disk(dev->gd);

drivers/mtd/ubi/build.c

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
#define MTD_PARAM_LEN_MAX 64
3636

3737
/* Maximum number of comma-separated items in the 'mtd=' parameter */
38-
#define MTD_PARAM_MAX_COUNT 4
38+
#define MTD_PARAM_MAX_COUNT 5
3939

4040
/* Maximum value for the number of bad PEBs per 1024 PEBs */
4141
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -53,12 +53,14 @@
5353
* @ubi_num: UBI number
5454
* @vid_hdr_offs: VID header offset
5555
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
56+
* @enable_fm: enable fastmap when value is non-zero
5657
*/
5758
struct mtd_dev_param {
5859
char name[MTD_PARAM_LEN_MAX];
5960
int ubi_num;
6061
int vid_hdr_offs;
6162
int max_beb_per1024;
63+
int enable_fm;
6264
};
6365

6466
/* Numbers of elements set in the @mtd_dev_param array */
@@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi)
468470
err = ubi_add_volume(ubi, ubi->volumes[i]);
469471
if (err) {
470472
ubi_err(ubi, "cannot add volume %d", i);
473+
ubi->volumes[i] = NULL;
471474
goto out_volumes;
472475
}
473476
}
@@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
663666
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
664667
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
665668

669+
if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
670+
ubi->vid_hdr_alsize)) {
671+
ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
672+
return -EINVAL;
673+
}
674+
666675
dbg_gen("min_io_size %d", ubi->min_io_size);
667676
dbg_gen("max_write_size %d", ubi->max_write_size);
668677
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
906915
ubi->dev.release = dev_release;
907916
ubi->dev.class = &ubi_class;
908917
ubi->dev.groups = ubi_dev_groups;
918+
ubi->dev.parent = &mtd->dev;
909919

910920
ubi->mtd = mtd;
911921
ubi->ubi_num = ubi_num;
@@ -1248,7 +1258,7 @@ static int __init ubi_init(void)
12481258
mutex_lock(&ubi_devices_mutex);
12491259
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
12501260
p->vid_hdr_offs, p->max_beb_per1024,
1251-
false);
1261+
p->enable_fm == 0 ? true : false);
12521262
mutex_unlock(&ubi_devices_mutex);
12531263
if (err < 0) {
12541264
pr_err("UBI error: cannot attach mtd%d\n",
@@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
14271437
int err = kstrtoint(token, 10, &p->max_beb_per1024);
14281438

14291439
if (err) {
1430-
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1440+
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
14311441
token);
14321442
return -EINVAL;
14331443
}
@@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
14381448
int err = kstrtoint(token, 10, &p->ubi_num);
14391449

14401450
if (err) {
1441-
pr_err("UBI error: bad value for ubi_num parameter: %s",
1451+
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
14421452
token);
14431453
return -EINVAL;
14441454
}
14451455
} else
14461456
p->ubi_num = UBI_DEV_NUM_AUTO;
14471457

1458+
token = tokens[4];
1459+
if (token) {
1460+
int err = kstrtoint(token, 10, &p->enable_fm);
1461+
1462+
if (err) {
1463+
pr_err("UBI error: bad value for enable_fm parameter: %s\n",
1464+
token);
1465+
return -EINVAL;
1466+
}
1467+
} else
1468+
p->enable_fm = 0;
1469+
14481470
mtd_devs += 1;
14491471
return 0;
14501472
}
@@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
14571479
"Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
14581480
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
14591481
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1482+
"Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
14601483
"\n"
14611484
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
14621485
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
14631486
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
14641487
"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1488+
"example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
14651489
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
14661490
#ifdef CONFIG_MTD_UBI_FASTMAP
14671491
module_param(fm_autoconvert, bool, 0644);

drivers/mtd/ubi/debug.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
504504
{
505505
unsigned long ubi_num = ubi->ubi_num;
506506
struct ubi_debug_info *d = &ubi->dbg;
507+
umode_t mode = S_IRUSR | S_IWUSR;
507508
int n;
508509

509510
if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
518519

519520
d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
520521

521-
d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
522+
d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
522523
(void *)ubi_num, &dfs_fops);
523524

524-
d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
525+
d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
525526
(void *)ubi_num, &dfs_fops);
526527

527-
d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
528+
d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
528529
d->dfs_dir, (void *)ubi_num,
529530
&dfs_fops);
530531

531-
d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
532+
d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
532533
d->dfs_dir, (void *)ubi_num,
533534
&dfs_fops);
534535

535536
d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
536-
S_IWUSR, d->dfs_dir,
537+
mode, d->dfs_dir,
537538
(void *)ubi_num,
538539
&dfs_fops);
539540

540541
d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
541-
S_IWUSR, d->dfs_dir,
542+
mode, d->dfs_dir,
542543
(void *)ubi_num,
543544
&dfs_fops);
544545

545546
d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
546-
S_IWUSR, d->dfs_dir,
547+
mode, d->dfs_dir,
547548
(void *)ubi_num,
548549
&dfs_fops);
549550

550551
d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
551-
S_IWUSR, d->dfs_dir,
552+
mode, d->dfs_dir,
552553
(void *)ubi_num, &dfs_fops);
553554

554555
d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
555-
S_IWUSR, d->dfs_dir,
556+
mode, d->dfs_dir,
556557
(void *)ubi_num, &dfs_fops);
557558

558559
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,

drivers/mtd/ubi/eba.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ struct ubi_eba_table {
6161
};
6262

6363
/**
64-
* next_sqnum - get next sequence number.
64+
* ubi_next_sqnum - get next sequence number.
6565
* @ubi: UBI device description object
6666
*
6767
* This function returns next sequence number to use, which is just the current

0 commit comments

Comments
 (0)