Skip to content

Commit b0077e2

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: make sure active queue usage is held for bio_integrity_prep()
blk_integrity_unregister() can come if queue usage counter isn't held for one bio with integrity prepared, so this request may be completed with calling profile->complete_fn, then kernel panic. Another constraint is that bio_integrity_prep() needs to be called before bio merge. Fix the issue by: - call bio_integrity_prep() with one queue usage counter grabbed reliably - call bio_integrity_prep() before bio merge Fixes: 900e080 ("block: move queue enter logic into blk_mq_submit_bio()") Reported-by: Yi Zhang <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Ming Lei <[email protected]> Tested-by: Yi Zhang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent b85ea95 commit b0077e2

File tree

1 file changed

+38
-37
lines changed

1 file changed

+38
-37
lines changed

block/blk-mq.c

Lines changed: 38 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
28582858
};
28592859
struct request *rq;
28602860

2861-
if (unlikely(bio_queue_enter(bio)))
2862-
return NULL;
2863-
28642861
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2865-
goto queue_exit;
2862+
return NULL;
28662863

28672864
rq_qos_throttle(q, bio);
28682865

@@ -2878,48 +2875,36 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
28782875
rq_qos_cleanup(q, bio);
28792876
if (bio->bi_opf & REQ_NOWAIT)
28802877
bio_wouldblock_error(bio);
2881-
queue_exit:
2882-
blk_queue_exit(q);
28832878
return NULL;
28842879
}
28852880

2886-
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2887-
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2881+
/* return true if this @rq can be used for @bio */
2882+
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2883+
struct bio *bio)
28882884
{
2889-
struct request *rq;
2890-
enum hctx_type type, hctx_type;
2885+
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2886+
enum hctx_type hctx_type = rq->mq_hctx->type;
28912887

2892-
if (!plug)
2893-
return NULL;
2894-
rq = rq_list_peek(&plug->cached_rq);
2895-
if (!rq || rq->q != q)
2896-
return NULL;
2888+
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
28972889

2898-
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
2899-
*bio = NULL;
2900-
return NULL;
2901-
}
2902-
2903-
type = blk_mq_get_hctx_type((*bio)->bi_opf);
2904-
hctx_type = rq->mq_hctx->type;
29052890
if (type != hctx_type &&
29062891
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
2907-
return NULL;
2908-
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2909-
return NULL;
2892+
return false;
2893+
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2894+
return false;
29102895

29112896
/*
29122897
* If any qos ->throttle() end up blocking, we will have flushed the
29132898
* plug and hence killed the cached_rq list as well. Pop this entry
29142899
* before we throttle.
29152900
*/
29162901
plug->cached_rq = rq_list_next(rq);
2917-
rq_qos_throttle(q, *bio);
2902+
rq_qos_throttle(rq->q, bio);
29182903

29192904
blk_mq_rq_time_init(rq, 0);
2920-
rq->cmd_flags = (*bio)->bi_opf;
2905+
rq->cmd_flags = bio->bi_opf;
29212906
INIT_LIST_HEAD(&rq->queuelist);
2922-
return rq;
2907+
return true;
29232908
}
29242909

29252910
static void bio_set_ioprio(struct bio *bio)
@@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio)
29492934
struct blk_plug *plug = blk_mq_plug(bio);
29502935
const int is_sync = op_is_sync(bio->bi_opf);
29512936
struct blk_mq_hw_ctx *hctx;
2952-
struct request *rq;
2937+
struct request *rq = NULL;
29532938
unsigned int nr_segs = 1;
29542939
blk_status_t ret;
29552940

@@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio)
29602945
return;
29612946
}
29622947

2963-
if (!bio_integrity_prep(bio))
2964-
return;
2965-
29662948
bio_set_ioprio(bio);
29672949

2968-
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2969-
if (!rq) {
2970-
if (!bio)
2950+
if (plug) {
2951+
rq = rq_list_peek(&plug->cached_rq);
2952+
if (rq && rq->q != q)
2953+
rq = NULL;
2954+
}
2955+
if (rq) {
2956+
if (!bio_integrity_prep(bio))
29712957
return;
2972-
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2973-
if (unlikely(!rq))
2958+
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
29742959
return;
2960+
if (blk_mq_can_use_cached_rq(rq, plug, bio))
2961+
goto done;
2962+
percpu_ref_get(&q->q_usage_counter);
2963+
} else {
2964+
if (unlikely(bio_queue_enter(bio)))
2965+
return;
2966+
if (!bio_integrity_prep(bio))
2967+
goto fail;
2968+
}
2969+
2970+
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2971+
if (unlikely(!rq)) {
2972+
fail:
2973+
blk_queue_exit(q);
2974+
return;
29752975
}
29762976

2977+
done:
29772978
trace_block_getrq(bio);
29782979

29792980
rq_qos_track(q, rq, bio);

0 commit comments

Comments
 (0)