Skip to content

Commit d94ecfc

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: split out a __blk_mq_get_driver_tag helper
Allocation of the driver tag in the case of using a scheduler shares very little code with the "normal" tag allocation. Split out a new helper to streamline this path, and untangle it from the complex normal tag allocation. This way also avoids to fail driver tag allocation because of inactive hctx during cpu hotplug, and fixes potential hang risk. Fixes: bf0beec ("blk-mq: drain I/O when all CPUs in a hctx are offline") Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: John Garry <[email protected]> Cc: Dongli Zhang <[email protected]> Cc: Hannes Reinecke <[email protected]> Cc: Daniel Wagner <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 5aec598 commit d94ecfc

File tree

4 files changed

+35
-30
lines changed

4 files changed

+35
-30
lines changed

block/blk-mq-tag.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,33 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
191191
return tag + tag_offset;
192192
}
193193

194+
bool __blk_mq_get_driver_tag(struct request *rq)
195+
{
196+
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
197+
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
198+
bool shared = blk_mq_tag_busy(rq->mq_hctx);
199+
int tag;
200+
201+
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
202+
bt = &rq->mq_hctx->tags->breserved_tags;
203+
tag_offset = 0;
204+
}
205+
206+
if (!hctx_may_queue(rq->mq_hctx, bt))
207+
return false;
208+
tag = __sbitmap_queue_get(bt);
209+
if (tag == BLK_MQ_NO_TAG)
210+
return false;
211+
212+
rq->tag = tag + tag_offset;
213+
if (shared) {
214+
rq->rq_flags |= RQF_MQ_INFLIGHT;
215+
atomic_inc(&rq->mq_hctx->nr_active);
216+
}
217+
rq->mq_hctx->tags->rqs[rq->tag] = rq;
218+
return true;
219+
}
220+
194221
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
195222
unsigned int tag)
196223
{

block/blk-mq-tag.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,14 @@ enum {
5151
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
5252
};
5353

54+
bool __blk_mq_get_driver_tag(struct request *rq);
55+
static inline bool blk_mq_get_driver_tag(struct request *rq)
56+
{
57+
if (rq->tag != BLK_MQ_NO_TAG)
58+
return true;
59+
return __blk_mq_get_driver_tag(rq);
60+
}
61+
5462
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
5563
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
5664

block/blk-mq.c

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1052,35 +1052,6 @@ static inline unsigned int queued_to_index(unsigned int queued)
10521052
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
10531053
}
10541054

1055-
bool blk_mq_get_driver_tag(struct request *rq)
1056-
{
1057-
struct blk_mq_alloc_data data = {
1058-
.q = rq->q,
1059-
.hctx = rq->mq_hctx,
1060-
.flags = BLK_MQ_REQ_NOWAIT,
1061-
.cmd_flags = rq->cmd_flags,
1062-
};
1063-
bool shared;
1064-
1065-
if (rq->tag != BLK_MQ_NO_TAG)
1066-
return true;
1067-
1068-
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1069-
data.flags |= BLK_MQ_REQ_RESERVED;
1070-
1071-
shared = blk_mq_tag_busy(data.hctx);
1072-
rq->tag = blk_mq_get_tag(&data);
1073-
if (rq->tag >= 0) {
1074-
if (shared) {
1075-
rq->rq_flags |= RQF_MQ_INFLIGHT;
1076-
atomic_inc(&data.hctx->nr_active);
1077-
}
1078-
data.hctx->tags->rqs[rq->tag] = rq;
1079-
}
1080-
1081-
return rq->tag != BLK_MQ_NO_TAG;
1082-
}
1083-
10841055
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
10851056
int flags, void *key)
10861057
{

block/blk-mq.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
4444
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
4545
bool kick_requeue_list);
4646
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
47-
bool blk_mq_get_driver_tag(struct request *rq);
4847
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
4948
struct blk_mq_ctx *start);
5049

0 commit comments

Comments
 (0)