Skip to content

Commit 900e080

Browse files
committed
block: move queue enter logic into blk_mq_submit_bio()
Retain the old logic for the fops based submit, but for our internal blk_mq_submit_bio(), move the queue entering logic into the core function itself. We need to be a bit careful if going into the scheduler, as a scheduler or queue mappings can arbitrarily change before we have entered the queue. Have the bio scheduler mapping do that separately, it's a very cheap operation compared to actually doing merging locking and lookups. Reviewed-by: Christoph Hellwig <[email protected]> [axboe: update to check merge post submit_bio_checks() doing remap...] Signed-off-by: Jens Axboe <[email protected]>
1 parent c98cb5b commit 900e080

File tree

4 files changed

+65
-34
lines changed

4 files changed

+65
-34
lines changed

block/blk-core.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
744744
return BLK_STS_OK;
745745
}
746746

747-
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
747+
noinline_for_stack bool submit_bio_checks(struct bio *bio)
748748
{
749749
struct block_device *bdev = bio->bi_bdev;
750750
struct request_queue *q = bdev_get_queue(bdev);
@@ -862,22 +862,23 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
862862
return false;
863863
}
864864

865-
static void __submit_bio(struct bio *bio)
865+
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
866866
{
867-
struct gendisk *disk = bio->bi_bdev->bd_disk;
868-
869867
if (unlikely(bio_queue_enter(bio) != 0))
870868
return;
869+
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
870+
disk->fops->submit_bio(bio);
871+
blk_queue_exit(disk->queue);
872+
}
871873

872-
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
873-
goto queue_exit;
874-
if (!disk->fops->submit_bio) {
874+
static void __submit_bio(struct bio *bio)
875+
{
876+
struct gendisk *disk = bio->bi_bdev->bd_disk;
877+
878+
if (!disk->fops->submit_bio)
875879
blk_mq_submit_bio(bio);
876-
return;
877-
}
878-
disk->fops->submit_bio(bio);
879-
queue_exit:
880-
blk_queue_exit(disk->queue);
880+
else
881+
__submit_bio_fops(disk, bio);
881882
}
882883

883884
/*

block/blk-mq-sched.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
370370
bool ret = false;
371371
enum hctx_type type;
372372

373-
if (e && e->type->ops.bio_merge)
374-
return e->type->ops.bio_merge(q, bio, nr_segs);
373+
if (bio_queue_enter(bio))
374+
return false;
375+
376+
if (e && e->type->ops.bio_merge) {
377+
ret = e->type->ops.bio_merge(q, bio, nr_segs);
378+
goto out_put;
379+
}
375380

376381
ctx = blk_mq_get_ctx(q);
377382
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
378383
type = hctx->type;
379384
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
380385
list_empty_careful(&ctx->rq_lists[type]))
381-
return false;
386+
goto out_put;
382387

383388
/* default per sw-queue merge */
384389
spin_lock(&ctx->lock);
@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
391396
ret = true;
392397

393398
spin_unlock(&ctx->lock);
399+
out_put:
400+
blk_queue_exit(q);
394401
return ret;
395402
}
396403

block/blk-mq.c

Lines changed: 41 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2478,9 +2478,23 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
24782478
return BLK_MAX_REQUEST_COUNT;
24792479
}
24802480

2481+
static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
2482+
unsigned int nr_segs, bool *same_queue_rq)
2483+
{
2484+
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2485+
if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
2486+
return true;
2487+
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2488+
return true;
2489+
}
2490+
return false;
2491+
}
2492+
24812493
static struct request *blk_mq_get_new_requests(struct request_queue *q,
24822494
struct blk_plug *plug,
2483-
struct bio *bio)
2495+
struct bio *bio,
2496+
unsigned int nsegs,
2497+
bool *same_queue_rq)
24842498
{
24852499
struct blk_mq_alloc_data data = {
24862500
.q = q,
@@ -2489,6 +2503,15 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
24892503
};
24902504
struct request *rq;
24912505

2506+
if (unlikely(bio_queue_enter(bio)))
2507+
return NULL;
2508+
if (unlikely(!submit_bio_checks(bio)))
2509+
goto put_exit;
2510+
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
2511+
goto put_exit;
2512+
2513+
rq_qos_throttle(q, bio);
2514+
24922515
if (plug) {
24932516
data.nr_tags = plug->nr_ios;
24942517
plug->nr_ios = 1;
@@ -2502,25 +2525,34 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25022525
rq_qos_cleanup(q, bio);
25032526
if (bio->bi_opf & REQ_NOWAIT)
25042527
bio_wouldblock_error(bio);
2528+
put_exit:
2529+
blk_queue_exit(q);
25052530
return NULL;
25062531
}
25072532

25082533
static inline struct request *blk_mq_get_request(struct request_queue *q,
25092534
struct blk_plug *plug,
2510-
struct bio *bio)
2535+
struct bio *bio,
2536+
unsigned int nsegs,
2537+
bool *same_queue_rq)
25112538
{
25122539
if (plug) {
25132540
struct request *rq;
25142541

25152542
rq = rq_list_peek(&plug->cached_rq);
25162543
if (rq) {
2544+
if (unlikely(!submit_bio_checks(bio)))
2545+
return NULL;
2546+
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
2547+
return NULL;
25172548
plug->cached_rq = rq_list_next(rq);
25182549
INIT_LIST_HEAD(&rq->queuelist);
2550+
rq_qos_throttle(q, bio);
25192551
return rq;
25202552
}
25212553
}
25222554

2523-
return blk_mq_get_new_requests(q, plug, bio);
2555+
return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
25242556
}
25252557

25262558
/**
@@ -2546,26 +2578,20 @@ void blk_mq_submit_bio(struct bio *bio)
25462578
unsigned int nr_segs = 1;
25472579
blk_status_t ret;
25482580

2581+
if (unlikely(!blk_crypto_bio_prep(&bio)))
2582+
return;
2583+
25492584
blk_queue_bounce(q, &bio);
25502585
if (blk_may_split(q, bio))
25512586
__blk_queue_split(q, &bio, &nr_segs);
25522587

25532588
if (!bio_integrity_prep(bio))
2554-
goto queue_exit;
2555-
2556-
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2557-
if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
2558-
goto queue_exit;
2559-
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2560-
goto queue_exit;
2561-
}
2562-
2563-
rq_qos_throttle(q, bio);
2589+
return;
25642590

25652591
plug = blk_mq_plug(q, bio);
2566-
rq = blk_mq_get_request(q, plug, bio);
2592+
rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
25672593
if (unlikely(!rq))
2568-
goto queue_exit;
2594+
return;
25692595

25702596
trace_block_getrq(bio);
25712597

@@ -2646,10 +2672,6 @@ void blk_mq_submit_bio(struct bio *bio)
26462672
/* Default case. */
26472673
blk_mq_sched_insert_request(rq, false, true, true);
26482674
}
2649-
2650-
return;
2651-
queue_exit:
2652-
blk_queue_exit(q);
26532675
}
26542676

26552677
static size_t order_to_size(unsigned int order)

block/blk.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q);
5656
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
5757
void blk_queue_start_drain(struct request_queue *q);
5858
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
59+
bool submit_bio_checks(struct bio *bio);
5960

6061
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
6162
{

0 commit comments

Comments
 (0)