Skip to content

Commit 3e28850

Browse files
committed
Merge tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Set of fixes for the batched tag allocation (Ming, me) - add_disk() error handling fix (Luis) - Nested queue quiesce fixes (Ming) - Shared tags init error handling fix (Ye) - Misc cleanups (Jean, Ming, me) * tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block: nvme: wait until quiesce is done scsi: make sure that request queue queiesce and unquiesce balanced scsi: avoid to quiesce sdev->request_queue two times blk-mq: add one API for waiting until quiesce is done blk-mq: don't free tags if the tag_set is used by other device in queue initialztion block: fix device_add_disk() kobject_create_and_add() error handling block: ensure cached plug request matches the current queue block: move queue enter logic into blk_mq_submit_bio() block: make bio_queue_enter() fast-path available inline block: split request allocation components into helpers block: have plug stored requests hold references to the queue blk-mq: update hctx->nr_active in blk_mq_end_request_batch() blk-mq: add RQF_ELV debug entry blk-mq: only try to run plug merge if request has same queue with incoming bio block: move RQF_ELV setting into allocators dm: don't stop request queue after the dm device is suspended block: replace always false argument with 'false' block: assign correct tag before doing prefetch of request blk-mq: fix redundant check of !e expression
2 parents 1dc1f92 + 26af1cd commit 3e28850

File tree

13 files changed

+263
-140
lines changed

13 files changed

+263
-140
lines changed

block/blk-core.c

Lines changed: 21 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
386386
}
387387
EXPORT_SYMBOL(blk_cleanup_queue);
388388

389-
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
390-
{
391-
rcu_read_lock();
392-
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
393-
goto fail;
394-
395-
/*
396-
* The code that increments the pm_only counter must ensure that the
397-
* counter is globally visible before the queue is unfrozen.
398-
*/
399-
if (blk_queue_pm_only(q) &&
400-
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
401-
goto fail_put;
402-
403-
rcu_read_unlock();
404-
return true;
405-
406-
fail_put:
407-
blk_queue_exit(q);
408-
fail:
409-
rcu_read_unlock();
410-
return false;
411-
}
412-
413389
/**
414390
* blk_queue_enter() - try to increase q->q_usage_counter
415391
* @q: request queue pointer
@@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
442418
return 0;
443419
}
444420

445-
static inline int bio_queue_enter(struct bio *bio)
421+
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
446422
{
447-
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
448-
449423
while (!blk_try_enter_queue(q, false)) {
450424
struct gendisk *disk = bio->bi_bdev->bd_disk;
451425

@@ -742,7 +716,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
742716
return BLK_STS_OK;
743717
}
744718

745-
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
719+
noinline_for_stack bool submit_bio_checks(struct bio *bio)
746720
{
747721
struct block_device *bdev = bio->bi_bdev;
748722
struct request_queue *q = bdev_get_queue(bdev);
@@ -860,22 +834,23 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
860834
return false;
861835
}
862836

863-
static void __submit_bio(struct bio *bio)
837+
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
864838
{
865-
struct gendisk *disk = bio->bi_bdev->bd_disk;
866-
867839
if (unlikely(bio_queue_enter(bio) != 0))
868840
return;
841+
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
842+
disk->fops->submit_bio(bio);
843+
blk_queue_exit(disk->queue);
844+
}
869845

870-
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
871-
goto queue_exit;
872-
if (!disk->fops->submit_bio) {
846+
static void __submit_bio(struct bio *bio)
847+
{
848+
struct gendisk *disk = bio->bi_bdev->bd_disk;
849+
850+
if (!disk->fops->submit_bio)
873851
blk_mq_submit_bio(bio);
874-
return;
875-
}
876-
disk->fops->submit_bio(bio);
877-
queue_exit:
878-
blk_queue_exit(disk->queue);
852+
else
853+
__submit_bio_fops(disk, bio);
879854
}
880855

881856
/*
@@ -1615,7 +1590,13 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
16151590
flush_plug_callbacks(plug, from_schedule);
16161591
if (!rq_list_empty(plug->mq_list))
16171592
blk_mq_flush_plug_list(plug, from_schedule);
1618-
if (unlikely(!from_schedule && plug->cached_rq))
1593+
/*
1594+
* Unconditionally flush out cached requests, even if the unplug
1595+
* event came from schedule. Since we know hold references to the
1596+
* queue for cached requests, we don't want a blocked task holding
1597+
* up a queue freeze/quiesce event.
1598+
*/
1599+
if (unlikely(!rq_list_empty(plug->cached_rq)))
16191600
blk_mq_free_plug_rqs(plug);
16201601
}
16211602

block/blk-merge.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,9 +1101,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
11011101
* the same queue, there should be only one such rq in a queue
11021102
*/
11031103
*same_queue_rq = true;
1104+
1105+
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1106+
BIO_MERGE_OK)
1107+
return true;
11041108
}
1105-
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK)
1106-
return true;
11071109
return false;
11081110
}
11091111

block/blk-mq-debugfs.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,7 @@ static const char *const rqf_name[] = {
308308
RQF_NAME(SPECIAL_PAYLOAD),
309309
RQF_NAME(ZONE_WRITE_LOCKED),
310310
RQF_NAME(MQ_POLL_SLEPT),
311+
RQF_NAME(ELV),
311312
};
312313
#undef RQF_NAME
313314

block/blk-mq-sched.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
370370
bool ret = false;
371371
enum hctx_type type;
372372

373-
if (e && e->type->ops.bio_merge)
374-
return e->type->ops.bio_merge(q, bio, nr_segs);
373+
if (bio_queue_enter(bio))
374+
return false;
375+
376+
if (e && e->type->ops.bio_merge) {
377+
ret = e->type->ops.bio_merge(q, bio, nr_segs);
378+
goto out_put;
379+
}
375380

376381
ctx = blk_mq_get_ctx(q);
377382
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
378383
type = hctx->type;
379384
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
380385
list_empty_careful(&ctx->rq_lists[type]))
381-
return false;
386+
goto out_put;
382387

383388
/* default per sw-queue merge */
384389
spin_lock(&ctx->lock);
@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
391396
ret = true;
392397

393398
spin_unlock(&ctx->lock);
399+
out_put:
400+
blk_queue_exit(q);
394401
return ret;
395402
}
396403

@@ -497,7 +504,7 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
497504
* busy in case of 'none' scheduler, and this way may save
498505
* us one extra enqueue & dequeue to sw queue.
499506
*/
500-
if (!hctx->dispatch_busy && !e && !run_queue_async) {
507+
if (!hctx->dispatch_busy && !run_queue_async) {
501508
blk_mq_try_issue_list_directly(hctx, list);
502509
if (list_empty(list))
503510
goto out;

0 commit comments

Comments
 (0)