Skip to content

Commit f44c7db

Browse files
committed
Merge tag 'block-5.16-2021-11-13' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Set of fixes that should go into this merge window: - ioctl vs read data race fixes (Shin'ichiro) - blkcg use-after-free fix (Laibin) - Last piece of the puzzle for add_disk() error handling, enable __must_check for (Luis) - Request allocation fixes (Ming) - Misc fixes (me)" * tag 'block-5.16-2021-11-13' of git://git.kernel.dk/linux-block: blk-mq: fix filesystem I/O request allocation blkcg: Remove extra blkcg_bio_issue_init block: Hold invalidate_lock in BLKRESETZONE ioctl blk-mq: rename blk_attempt_bio_merge blk-mq: don't grab ->q_usage_counter in blk_mq_sched_bio_merge block: fix kerneldoc for disk_register_independent_access__ranges() block: add __must_check for *add_disk*() callers block: use enum type for blk_mq_alloc_data->rq_flags block: Hold invalidate_lock in BLKZEROOUT ioctl block: Hold invalidate_lock in BLKDISCARD ioctl
2 parents 2b7196a + b637108 commit f44c7db

File tree

9 files changed

+83
-55
lines changed

9 files changed

+83
-55
lines changed

block/blk-core.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -809,10 +809,8 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
809809
if (unlikely(!current->io_context))
810810
create_task_io_context(current, GFP_ATOMIC, q->node);
811811

812-
if (blk_throtl_bio(bio)) {
813-
blkcg_bio_issue_init(bio);
812+
if (blk_throtl_bio(bio))
814813
return false;
815-
}
816814

817815
blk_cgroup_bio_start(bio);
818816
blkcg_bio_issue_init(bio);

block/blk-ia-ranges.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ static struct kobj_type blk_ia_ranges_ktype = {
104104
};
105105

106106
/**
107-
* disk_register_ia_ranges - register with sysfs a set of independent
108-
* access ranges
107+
* disk_register_independent_access_ranges - register with sysfs a set of
108+
* independent access ranges
109109
* @disk: Target disk
110110
* @new_iars: New set of independent access ranges
111111
*

block/blk-mq-sched.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -370,9 +370,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
370370
bool ret = false;
371371
enum hctx_type type;
372372

373-
if (bio_queue_enter(bio))
374-
return false;
375-
376373
if (e && e->type->ops.bio_merge) {
377374
ret = e->type->ops.bio_merge(q, bio, nr_segs);
378375
goto out_put;
@@ -397,7 +394,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
397394

398395
spin_unlock(&ctx->lock);
399396
out_put:
400-
blk_queue_exit(q);
401397
return ret;
402398
}
403399

block/blk-mq.c

Lines changed: 35 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2495,8 +2495,9 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
24952495
return BLK_MAX_REQUEST_COUNT;
24962496
}
24972497

2498-
static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
2499-
unsigned int nr_segs, bool *same_queue_rq)
2498+
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2499+
struct bio *bio, unsigned int nr_segs,
2500+
bool *same_queue_rq)
25002501
{
25012502
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
25022503
if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
@@ -2520,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25202521
};
25212522
struct request *rq;
25222523

2523-
if (unlikely(bio_queue_enter(bio)))
2524+
if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
25242525
return NULL;
2525-
if (unlikely(!submit_bio_checks(bio)))
2526-
goto put_exit;
2527-
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
2528-
goto put_exit;
25292526

25302527
rq_qos_throttle(q, bio);
25312528

@@ -2542,34 +2539,60 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25422539
rq_qos_cleanup(q, bio);
25432540
if (bio->bi_opf & REQ_NOWAIT)
25442541
bio_wouldblock_error(bio);
2545-
put_exit:
2546-
blk_queue_exit(q);
2542+
25472543
return NULL;
25482544
}
25492545

2546+
static inline bool blk_mq_can_use_cached_rq(struct request *rq,
2547+
struct bio *bio)
2548+
{
2549+
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
2550+
return false;
2551+
2552+
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2553+
return false;
2554+
2555+
return true;
2556+
}
2557+
25502558
static inline struct request *blk_mq_get_request(struct request_queue *q,
25512559
struct blk_plug *plug,
25522560
struct bio *bio,
25532561
unsigned int nsegs,
25542562
bool *same_queue_rq)
25552563
{
2564+
struct request *rq;
2565+
bool checked = false;
2566+
25562567
if (plug) {
2557-
struct request *rq;
25582568

25592569
rq = rq_list_peek(&plug->cached_rq);
25602570
if (rq && rq->q == q) {
25612571
if (unlikely(!submit_bio_checks(bio)))
25622572
return NULL;
2563-
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
2573+
if (blk_mq_attempt_bio_merge(q, bio, nsegs,
2574+
same_queue_rq))
25642575
return NULL;
2576+
checked = true;
2577+
if (!blk_mq_can_use_cached_rq(rq, bio))
2578+
goto fallback;
2579+
rq->cmd_flags = bio->bi_opf;
25652580
plug->cached_rq = rq_list_next(rq);
25662581
INIT_LIST_HEAD(&rq->queuelist);
25672582
rq_qos_throttle(q, bio);
25682583
return rq;
25692584
}
25702585
}
25712586

2572-
return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
2587+
fallback:
2588+
if (unlikely(bio_queue_enter(bio)))
2589+
return NULL;
2590+
if (!checked && !submit_bio_checks(bio))
2591+
return NULL;
2592+
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
2593+
if (!rq)
2594+
blk_queue_exit(q);
2595+
return rq;
25732596
}
25742597

25752598
/**

block/blk-mq.h

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
8989
return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
9090
}
9191

92-
/*
93-
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
94-
* @q: request queue
95-
* @flags: request command flags
96-
* @ctx: software queue cpu ctx
97-
*/
98-
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
99-
unsigned int flags,
100-
struct blk_mq_ctx *ctx)
92+
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
10193
{
10294
enum hctx_type type = HCTX_TYPE_DEFAULT;
10395

@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
108100
type = HCTX_TYPE_POLL;
109101
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
110102
type = HCTX_TYPE_READ;
111-
112-
return ctx->hctxs[type];
103+
return type;
104+
}
105+
106+
/*
107+
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108+
* @q: request queue
109+
* @flags: request command flags
110+
* @ctx: software queue cpu ctx
111+
*/
112+
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113+
unsigned int flags,
114+
struct blk_mq_ctx *ctx)
115+
{
116+
return ctx->hctxs[blk_mq_get_hctx_type(flags)];
113117
}
114118

115119
/*
@@ -149,7 +153,7 @@ struct blk_mq_alloc_data {
149153
blk_mq_req_flags_t flags;
150154
unsigned int shallow_depth;
151155
unsigned int cmd_flags;
152-
unsigned int rq_flags;
156+
req_flags_t rq_flags;
153157

154158
/* allocate multiple requests/tags in one go */
155159
unsigned int nr_tags;

block/blk-zoned.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -429,9 +429,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
429429
op = REQ_OP_ZONE_RESET;
430430

431431
/* Invalidate the page cache, including dirty pages. */
432+
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
432433
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
433434
if (ret)
434-
return ret;
435+
goto fail;
435436
break;
436437
case BLKOPENZONE:
437438
op = REQ_OP_ZONE_OPEN;
@@ -449,15 +450,9 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
449450
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
450451
GFP_KERNEL);
451452

452-
/*
453-
* Invalidate the page cache again for zone reset: writes can only be
454-
* direct for zoned devices so concurrent writes would not add any page
455-
* to the page cache after/during reset. The page cache may be filled
456-
* again due to concurrent reads though and dropping the pages for
457-
* these is fine.
458-
*/
459-
if (!ret && cmd == BLKRESETZONE)
460-
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
453+
fail:
454+
if (cmd == BLKRESETZONE)
455+
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
461456

462457
return ret;
463458
}

block/genhd.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -394,8 +394,8 @@ static void disk_scan_partitions(struct gendisk *disk)
394394
* This function registers the partitioning information in @disk
395395
* with the kernel.
396396
*/
397-
int device_add_disk(struct device *parent, struct gendisk *disk,
398-
const struct attribute_group **groups)
397+
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
398+
const struct attribute_group **groups)
399399

400400
{
401401
struct device *ddev = disk_to_dev(disk);
@@ -544,7 +544,7 @@ int device_add_disk(struct device *parent, struct gendisk *disk,
544544
out_free_ext_minor:
545545
if (disk->major == BLOCK_EXT_MAJOR)
546546
blk_free_ext_minor(disk->first_minor);
547-
return WARN_ON_ONCE(ret); /* keep until all callers handle errors */
547+
return ret;
548548
}
549549
EXPORT_SYMBOL(device_add_disk);
550550

block/ioctl.c

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
113113
uint64_t range[2];
114114
uint64_t start, len;
115115
struct request_queue *q = bdev_get_queue(bdev);
116+
struct inode *inode = bdev->bd_inode;
116117
int err;
117118

118119
if (!(mode & FMODE_WRITE))
@@ -135,19 +136,25 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
135136
if (start + len > bdev_nr_bytes(bdev))
136137
return -EINVAL;
137138

139+
filemap_invalidate_lock(inode->i_mapping);
138140
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
139141
if (err)
140-
return err;
142+
goto fail;
141143

142-
return blkdev_issue_discard(bdev, start >> 9, len >> 9,
143-
GFP_KERNEL, flags);
144+
err = blkdev_issue_discard(bdev, start >> 9, len >> 9,
145+
GFP_KERNEL, flags);
146+
147+
fail:
148+
filemap_invalidate_unlock(inode->i_mapping);
149+
return err;
144150
}
145151

146152
static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
147153
unsigned long arg)
148154
{
149155
uint64_t range[2];
150156
uint64_t start, end, len;
157+
struct inode *inode = bdev->bd_inode;
151158
int err;
152159

153160
if (!(mode & FMODE_WRITE))
@@ -170,12 +177,17 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
170177
return -EINVAL;
171178

172179
/* Invalidate the page cache, including dirty pages */
180+
filemap_invalidate_lock(inode->i_mapping);
173181
err = truncate_bdev_range(bdev, mode, start, end);
174182
if (err)
175-
return err;
183+
goto fail;
184+
185+
err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
186+
BLKDEV_ZERO_NOUNMAP);
176187

177-
return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
178-
BLKDEV_ZERO_NOUNMAP);
188+
fail:
189+
filemap_invalidate_unlock(inode->i_mapping);
190+
return err;
179191
}
180192

181193
static int put_ushort(unsigned short __user *argp, unsigned short val)

include/linux/genhd.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -205,9 +205,9 @@ static inline dev_t disk_devt(struct gendisk *disk)
205205
void disk_uevent(struct gendisk *disk, enum kobject_action action);
206206

207207
/* block/genhd.c */
208-
int device_add_disk(struct device *parent, struct gendisk *disk,
209-
const struct attribute_group **groups);
210-
static inline int add_disk(struct gendisk *disk)
208+
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
209+
const struct attribute_group **groups);
210+
static inline int __must_check add_disk(struct gendisk *disk)
211211
{
212212
return device_add_disk(NULL, disk, NULL);
213213
}

0 commit comments

Comments
 (0)