Skip to content

Commit c5fc7b9

Browse files
committed
block: have plug stored requests hold references to the queue
Requests that were stored in the cache deliberately didn't hold an enter reference to the queue, instead we grabbed one every time we pulled a request out of there. That made for awkward logic on freeing the remainder of the cached list, if needed, where we had to artificially raise the queue usage count before each free. Grab references up front for cached plug requests. That's safer, and also more efficient. Fixes: 47c122e ("block: pre-allocate requests if plug is started and is a batch") Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 3b87c6e commit c5fc7b9

File tree

2 files changed

+11
-4
lines changed

2 files changed

+11
-4
lines changed

block/blk-core.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1643,7 +1643,13 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
16431643
flush_plug_callbacks(plug, from_schedule);
16441644
if (!rq_list_empty(plug->mq_list))
16451645
blk_mq_flush_plug_list(plug, from_schedule);
1646-
if (unlikely(!from_schedule && plug->cached_rq))
1646+
/*
1647+
* Unconditionally flush out cached requests, even if the unplug
1648+
* event came from schedule. Since we know hold references to the
1649+
* queue for cached requests, we don't want a blocked task holding
1650+
* up a queue freeze/quiesce event.
1651+
*/
1652+
if (unlikely(!rq_list_empty(plug->cached_rq)))
16471653
blk_mq_free_plug_rqs(plug);
16481654
}
16491655

block/blk-mq.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,10 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
410410
tag_mask &= ~(1UL << i);
411411
rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
412412
rq_list_add(data->cached_rq, rq);
413+
nr++;
413414
}
415+
/* caller already holds a reference, add for remainder */
416+
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
414417
data->nr_tags -= nr;
415418

416419
return rq_list_pop(data->cached_rq);
@@ -630,10 +633,8 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
630633
{
631634
struct request *rq;
632635

633-
while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
634-
percpu_ref_get(&rq->q->q_usage_counter);
636+
while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
635637
blk_mq_free_request(rq);
636-
}
637638
}
638639

639640
static void req_bio_endio(struct request *rq, struct bio *bio,

0 commit comments

Comments
 (0)