Skip to content

Commit b637108

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: fix filesystem I/O request allocation
submit_bio_checks() may update bio->bi_opf, so we have to initialize blk_mq_alloc_data.cmd_flags with bio->bi_opf after submit_bio_checks() returns when allocating new request. In case of using cached request, fallback to allocate new request if cached rq isn't compatible with the incoming bio, otherwise change rq->cmd_flags with incoming bio->bi_opf. Fixes: 900e080 ("block: move queue enter logic into blk_mq_submit_bio()") Reported-by: Geert Uytterhoeven <[email protected]> Tested-by: Geert Uytterhoeven <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent b781d8d commit b637108

File tree

2 files changed

+45
-20
lines changed

2 files changed

+45
-20
lines changed

block/blk-mq.c

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2521,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25212521
};
25222522
struct request *rq;
25232523

2524-
if (unlikely(bio_queue_enter(bio)))
2525-
return NULL;
2526-
if (unlikely(!submit_bio_checks(bio)))
2527-
goto put_exit;
25282524
if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
2529-
goto put_exit;
2525+
return NULL;
25302526

25312527
rq_qos_throttle(q, bio);
25322528

@@ -2543,19 +2539,32 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25432539
rq_qos_cleanup(q, bio);
25442540
if (bio->bi_opf & REQ_NOWAIT)
25452541
bio_wouldblock_error(bio);
2546-
put_exit:
2547-
blk_queue_exit(q);
2542+
25482543
return NULL;
25492544
}
25502545

2546+
static inline bool blk_mq_can_use_cached_rq(struct request *rq,
2547+
struct bio *bio)
2548+
{
2549+
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
2550+
return false;
2551+
2552+
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2553+
return false;
2554+
2555+
return true;
2556+
}
2557+
25512558
static inline struct request *blk_mq_get_request(struct request_queue *q,
25522559
struct blk_plug *plug,
25532560
struct bio *bio,
25542561
unsigned int nsegs,
25552562
bool *same_queue_rq)
25562563
{
2564+
struct request *rq;
2565+
bool checked = false;
2566+
25572567
if (plug) {
2558-
struct request *rq;
25592568

25602569
rq = rq_list_peek(&plug->cached_rq);
25612570
if (rq && rq->q == q) {
@@ -2564,14 +2573,26 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
25642573
if (blk_mq_attempt_bio_merge(q, bio, nsegs,
25652574
same_queue_rq))
25662575
return NULL;
2576+
checked = true;
2577+
if (!blk_mq_can_use_cached_rq(rq, bio))
2578+
goto fallback;
2579+
rq->cmd_flags = bio->bi_opf;
25672580
plug->cached_rq = rq_list_next(rq);
25682581
INIT_LIST_HEAD(&rq->queuelist);
25692582
rq_qos_throttle(q, bio);
25702583
return rq;
25712584
}
25722585
}
25732586

2574-
return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
2587+
fallback:
2588+
if (unlikely(bio_queue_enter(bio)))
2589+
return NULL;
2590+
if (!checked && !submit_bio_checks(bio))
2591+
return NULL;
2592+
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
2593+
if (!rq)
2594+
blk_queue_exit(q);
2595+
return rq;
25752596
}
25762597

25772598
/**

block/blk-mq.h

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
8989
return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
9090
}
9191

92-
/*
93-
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
94-
* @q: request queue
95-
* @flags: request command flags
96-
* @ctx: software queue cpu ctx
97-
*/
98-
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
99-
unsigned int flags,
100-
struct blk_mq_ctx *ctx)
92+
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
10193
{
10294
enum hctx_type type = HCTX_TYPE_DEFAULT;
10395

@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
108100
type = HCTX_TYPE_POLL;
109101
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
110102
type = HCTX_TYPE_READ;
111-
112-
return ctx->hctxs[type];
103+
return type;
104+
}
105+
106+
/*
107+
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108+
* @q: request queue
109+
* @flags: request command flags
110+
* @ctx: software queue cpu ctx
111+
*/
112+
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113+
unsigned int flags,
114+
struct blk_mq_ctx *ctx)
115+
{
116+
return ctx->hctxs[blk_mq_get_hctx_type(flags)];
113117
}
114118

115119
/*

0 commit comments

Comments
 (0)