Skip to content

Commit 7153971

Browse files
committed
block: split request allocation components into helpers
This is in preparation for a fix, but serves as a cleanup as well moving the cached vs regular alloc logic out of blk_mq_submit_bio(). Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent c5fc7b9 commit 7153971

File tree

1 file changed

+48
-23
lines changed

1 file changed

+48
-23
lines changed

block/blk-mq.c

Lines changed: 48 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2478,6 +2478,51 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
24782478
return BLK_MAX_REQUEST_COUNT;
24792479
}
24802480

2481+
static struct request *blk_mq_get_new_requests(struct request_queue *q,
2482+
struct blk_plug *plug,
2483+
struct bio *bio)
2484+
{
2485+
struct blk_mq_alloc_data data = {
2486+
.q = q,
2487+
.nr_tags = 1,
2488+
.cmd_flags = bio->bi_opf,
2489+
};
2490+
struct request *rq;
2491+
2492+
if (plug) {
2493+
data.nr_tags = plug->nr_ios;
2494+
plug->nr_ios = 1;
2495+
data.cached_rq = &plug->cached_rq;
2496+
}
2497+
2498+
rq = __blk_mq_alloc_requests(&data);
2499+
if (rq)
2500+
return rq;
2501+
2502+
rq_qos_cleanup(q, bio);
2503+
if (bio->bi_opf & REQ_NOWAIT)
2504+
bio_wouldblock_error(bio);
2505+
return NULL;
2506+
}
2507+
2508+
static inline struct request *blk_mq_get_request(struct request_queue *q,
2509+
struct blk_plug *plug,
2510+
struct bio *bio)
2511+
{
2512+
if (plug) {
2513+
struct request *rq;
2514+
2515+
rq = rq_list_peek(&plug->cached_rq);
2516+
if (rq) {
2517+
plug->cached_rq = rq_list_next(rq);
2518+
INIT_LIST_HEAD(&rq->queuelist);
2519+
return rq;
2520+
}
2521+
}
2522+
2523+
return blk_mq_get_new_requests(q, plug, bio);
2524+
}
2525+
24812526
/**
24822527
* blk_mq_submit_bio - Create and send a request to block device.
24832528
* @bio: Bio pointer.
@@ -2518,29 +2563,9 @@ void blk_mq_submit_bio(struct bio *bio)
25182563
rq_qos_throttle(q, bio);
25192564

25202565
plug = blk_mq_plug(q, bio);
2521-
if (plug && plug->cached_rq) {
2522-
rq = rq_list_pop(&plug->cached_rq);
2523-
INIT_LIST_HEAD(&rq->queuelist);
2524-
} else {
2525-
struct blk_mq_alloc_data data = {
2526-
.q = q,
2527-
.nr_tags = 1,
2528-
.cmd_flags = bio->bi_opf,
2529-
};
2530-
2531-
if (plug) {
2532-
data.nr_tags = plug->nr_ios;
2533-
plug->nr_ios = 1;
2534-
data.cached_rq = &plug->cached_rq;
2535-
}
2536-
rq = __blk_mq_alloc_requests(&data);
2537-
if (unlikely(!rq)) {
2538-
rq_qos_cleanup(q, bio);
2539-
if (bio->bi_opf & REQ_NOWAIT)
2540-
bio_wouldblock_error(bio);
2541-
goto queue_exit;
2542-
}
2543-
}
2566+
rq = blk_mq_get_request(q, plug, bio);
2567+
if (unlikely(!rq))
2568+
goto queue_exit;
25442569

25452570
trace_block_getrq(bio);
25462571

0 commit comments

Comments
 (0)