Skip to content

Commit a3396b9

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: add a rq_list type
Replace the semi-open coded request list helpers with a proper rq_list type that mirrors the bio_list and has head and tail pointers. Besides better type safety this actually allows to insert at the tail of the list, which will be useful soon. Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent e8225ab commit a3396b9

File tree

11 files changed

+104
-88
lines changed

11 files changed

+104
-88
lines changed

block/blk-core.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,8 +1120,8 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
11201120
return;
11211121

11221122
plug->cur_ktime = 0;
1123-
plug->mq_list = NULL;
1124-
plug->cached_rq = NULL;
1123+
rq_list_init(&plug->mq_list);
1124+
rq_list_init(&plug->cached_rqs);
11251125
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
11261126
plug->rq_count = 0;
11271127
plug->multiple_queues = false;
@@ -1217,7 +1217,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
12171217
* queue for cached requests, we don't want a blocked task holding
12181218
* up a queue freeze/quiesce event.
12191219
*/
1220-
if (unlikely(!rq_list_empty(plug->cached_rq)))
1220+
if (unlikely(!rq_list_empty(&plug->cached_rqs)))
12211221
blk_mq_free_plug_rqs(plug);
12221222

12231223
plug->cur_ktime = 0;

block/blk-merge.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1179,7 +1179,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
11791179
struct blk_plug *plug = current->plug;
11801180
struct request *rq;
11811181

1182-
if (!plug || rq_list_empty(plug->mq_list))
1182+
if (!plug || rq_list_empty(&plug->mq_list))
11831183
return false;
11841184

11851185
rq_list_for_each(&plug->mq_list, rq) {

block/blk-mq.c

Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
478478
prefetch(tags->static_rqs[tag]);
479479
tag_mask &= ~(1UL << i);
480480
rq = blk_mq_rq_ctx_init(data, tags, tag);
481-
rq_list_add(data->cached_rq, rq);
481+
rq_list_add_head(data->cached_rqs, rq);
482482
nr++;
483483
}
484484
if (!(data->rq_flags & RQF_SCHED_TAGS))
@@ -487,7 +487,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
487487
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
488488
data->nr_tags -= nr;
489489

490-
return rq_list_pop(data->cached_rq);
490+
return rq_list_pop(data->cached_rqs);
491491
}
492492

493493
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
@@ -584,7 +584,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
584584
.flags = flags,
585585
.cmd_flags = opf,
586586
.nr_tags = plug->nr_ios,
587-
.cached_rq = &plug->cached_rq,
587+
.cached_rqs = &plug->cached_rqs,
588588
};
589589
struct request *rq;
590590

@@ -609,14 +609,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
609609
if (!plug)
610610
return NULL;
611611

612-
if (rq_list_empty(plug->cached_rq)) {
612+
if (rq_list_empty(&plug->cached_rqs)) {
613613
if (plug->nr_ios == 1)
614614
return NULL;
615615
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
616616
if (!rq)
617617
return NULL;
618618
} else {
619-
rq = rq_list_peek(&plug->cached_rq);
619+
rq = rq_list_peek(&plug->cached_rqs);
620620
if (!rq || rq->q != q)
621621
return NULL;
622622

@@ -625,7 +625,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
625625
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
626626
return NULL;
627627

628-
plug->cached_rq = rq_list_next(rq);
628+
rq_list_pop(&plug->cached_rqs);
629629
blk_mq_rq_time_init(rq, blk_time_get_ns());
630630
}
631631

@@ -802,7 +802,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
802802
{
803803
struct request *rq;
804804

805-
while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
805+
while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL)
806806
blk_mq_free_request(rq);
807807
}
808808

@@ -1392,8 +1392,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
13921392
*/
13931393
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
13941394
plug->has_elevator = true;
1395-
rq->rq_next = NULL;
1396-
rq_list_add(&plug->mq_list, rq);
1395+
rq_list_add_head(&plug->mq_list, rq);
13971396
plug->rq_count++;
13981397
}
13991398

@@ -2785,7 +2784,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27852784
blk_status_t ret = BLK_STS_OK;
27862785

27872786
while ((rq = rq_list_pop(&plug->mq_list))) {
2788-
bool last = rq_list_empty(plug->mq_list);
2787+
bool last = rq_list_empty(&plug->mq_list);
27892788

27902789
if (hctx != rq->mq_hctx) {
27912790
if (hctx) {
@@ -2828,8 +2827,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
28282827
{
28292828
struct blk_mq_hw_ctx *this_hctx = NULL;
28302829
struct blk_mq_ctx *this_ctx = NULL;
2831-
struct request *requeue_list = NULL;
2832-
struct request **requeue_lastp = &requeue_list;
2830+
struct rq_list requeue_list = {};
28332831
unsigned int depth = 0;
28342832
bool is_passthrough = false;
28352833
LIST_HEAD(list);
@@ -2843,12 +2841,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
28432841
is_passthrough = blk_rq_is_passthrough(rq);
28442842
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
28452843
is_passthrough != blk_rq_is_passthrough(rq)) {
2846-
rq_list_add_tail(&requeue_lastp, rq);
2844+
rq_list_add_tail(&requeue_list, rq);
28472845
continue;
28482846
}
28492847
list_add(&rq->queuelist, &list);
28502848
depth++;
2851-
} while (!rq_list_empty(plug->mq_list));
2849+
} while (!rq_list_empty(&plug->mq_list));
28522850

28532851
plug->mq_list = requeue_list;
28542852
trace_block_unplug(this_hctx->queue, depth, !from_sched);
@@ -2903,19 +2901,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
29032901
if (q->mq_ops->queue_rqs) {
29042902
blk_mq_run_dispatch_ops(q,
29052903
__blk_mq_flush_plug_list(q, plug));
2906-
if (rq_list_empty(plug->mq_list))
2904+
if (rq_list_empty(&plug->mq_list))
29072905
return;
29082906
}
29092907

29102908
blk_mq_run_dispatch_ops(q,
29112909
blk_mq_plug_issue_direct(plug));
2912-
if (rq_list_empty(plug->mq_list))
2910+
if (rq_list_empty(&plug->mq_list))
29132911
return;
29142912
}
29152913

29162914
do {
29172915
blk_mq_dispatch_plug_list(plug, from_schedule);
2918-
} while (!rq_list_empty(plug->mq_list));
2916+
} while (!rq_list_empty(&plug->mq_list));
29192917
}
29202918

29212919
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2980,7 +2978,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
29802978
if (plug) {
29812979
data.nr_tags = plug->nr_ios;
29822980
plug->nr_ios = 1;
2983-
data.cached_rq = &plug->cached_rq;
2981+
data.cached_rqs = &plug->cached_rqs;
29842982
}
29852983

29862984
rq = __blk_mq_alloc_requests(&data);
@@ -3003,7 +3001,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
30033001

30043002
if (!plug)
30053003
return NULL;
3006-
rq = rq_list_peek(&plug->cached_rq);
3004+
rq = rq_list_peek(&plug->cached_rqs);
30073005
if (!rq || rq->q != q)
30083006
return NULL;
30093007
if (type != rq->mq_hctx->type &&
@@ -3017,14 +3015,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
30173015
static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
30183016
struct bio *bio)
30193017
{
3020-
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
3018+
if (rq_list_pop(&plug->cached_rqs) != rq)
3019+
WARN_ON_ONCE(1);
30213020

30223021
/*
30233022
* If any qos ->throttle() end up blocking, we will have flushed the
30243023
* plug and hence killed the cached_rq list as well. Pop this entry
30253024
* before we throttle.
30263025
*/
3027-
plug->cached_rq = rq_list_next(rq);
30283026
rq_qos_throttle(rq->q, bio);
30293027

30303028
blk_mq_rq_time_init(rq, blk_time_get_ns());

block/blk-mq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ struct blk_mq_alloc_data {
155155

156156
/* allocate multiple requests/tags in one go */
157157
unsigned int nr_tags;
158-
struct request **cached_rq;
158+
struct rq_list *cached_rqs;
159159

160160
/* input & output parameter */
161161
struct blk_mq_ctx *ctx;

drivers/block/null_blk/main.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
16381638
return BLK_STS_OK;
16391639
}
16401640

1641-
static void null_queue_rqs(struct request **rqlist)
1641+
static void null_queue_rqs(struct rq_list *rqlist)
16421642
{
1643-
struct request *requeue_list = NULL;
1644-
struct request **requeue_lastp = &requeue_list;
1643+
struct rq_list requeue_list = {};
16451644
struct blk_mq_queue_data bd = { };
16461645
blk_status_t ret;
16471646

@@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
16511650
bd.rq = rq;
16521651
ret = null_queue_rq(rq->mq_hctx, &bd);
16531652
if (ret != BLK_STS_OK)
1654-
rq_list_add_tail(&requeue_lastp, rq);
1655-
} while (!rq_list_empty(*rqlist));
1653+
rq_list_add_tail(&requeue_list, rq);
1654+
} while (!rq_list_empty(rqlist));
16561655

16571656
*rqlist = requeue_list;
16581657
}

drivers/block/virtio_blk.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
472472
}
473473

474474
static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
475-
struct request **rqlist)
475+
struct rq_list *rqlist)
476476
{
477477
struct request *req;
478478
unsigned long flags;
@@ -499,11 +499,10 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
499499
virtqueue_notify(vq->vq);
500500
}
501501

502-
static void virtio_queue_rqs(struct request **rqlist)
502+
static void virtio_queue_rqs(struct rq_list *rqlist)
503503
{
504-
struct request *submit_list = NULL;
505-
struct request *requeue_list = NULL;
506-
struct request **requeue_lastp = &requeue_list;
504+
struct rq_list submit_list = { };
505+
struct rq_list requeue_list = { };
507506
struct virtio_blk_vq *vq = NULL;
508507
struct request *req;
509508

@@ -515,9 +514,9 @@ static void virtio_queue_rqs(struct request **rqlist)
515514
vq = this_vq;
516515

517516
if (virtblk_prep_rq_batch(req))
518-
rq_list_add(&submit_list, req); /* reverse order */
517+
rq_list_add_head(&submit_list, req); /* reverse order */
519518
else
520-
rq_list_add_tail(&requeue_lastp, req);
519+
rq_list_add_tail(&requeue_list, req);
521520
}
522521

523522
if (vq)

drivers/nvme/host/apple.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
649649

650650
found = apple_nvme_poll_cq(q, &iob);
651651

652-
if (!rq_list_empty(iob.req_list))
652+
if (!rq_list_empty(&iob.req_list))
653653
apple_nvme_complete_batch(&iob);
654654

655655
return found;

drivers/nvme/host/pci.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -904,7 +904,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
904904
return BLK_STS_OK;
905905
}
906906

907-
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
907+
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
908908
{
909909
struct request *req;
910910

@@ -932,11 +932,10 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
932932
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
933933
}
934934

935-
static void nvme_queue_rqs(struct request **rqlist)
935+
static void nvme_queue_rqs(struct rq_list *rqlist)
936936
{
937-
struct request *submit_list = NULL;
938-
struct request *requeue_list = NULL;
939-
struct request **requeue_lastp = &requeue_list;
937+
struct rq_list submit_list = { };
938+
struct rq_list requeue_list = { };
940939
struct nvme_queue *nvmeq = NULL;
941940
struct request *req;
942941

@@ -946,9 +945,9 @@ static void nvme_queue_rqs(struct request **rqlist)
946945
nvmeq = req->mq_hctx->driver_data;
947946

948947
if (nvme_prep_rq_batch(nvmeq, req))
949-
rq_list_add(&submit_list, req); /* reverse order */
948+
rq_list_add_head(&submit_list, req); /* reverse order */
950949
else
951-
rq_list_add_tail(&requeue_lastp, req);
950+
rq_list_add_tail(&requeue_list, req);
952951
}
953952

954953
if (nvmeq)
@@ -1080,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
10801079
DEFINE_IO_COMP_BATCH(iob);
10811080

10821081
if (nvme_poll_cq(nvmeq, &iob)) {
1083-
if (!rq_list_empty(iob.req_list))
1082+
if (!rq_list_empty(&iob.req_list))
10841083
nvme_pci_complete_batch(&iob);
10851084
return IRQ_HANDLED;
10861085
}

0 commit comments

Comments
 (0)