Skip to content

Commit beadf00

Browse files
Christoph Hellwigaxboe
authored andcommitted
nvme-pci: reverse request order in nvme_queue_rqs
blk_mq_flush_plug_list submits requests in the reverse order that they were submitted, which leads to a rather suboptimal I/O pattern especially in rotational devices. Fix this by rewriting nvme_queue_rqs so that it always pops the requests from the passed in request list, and then adds them to the head of a local submit list. This actually simplifies the code a bit as it removes the complicated list splicing, at the cost of extra updates of the rq_next pointer. As that should be cache hot anyway it should be an easy price to pay. Fixes: d62cbcf ("nvme: add support for mq_ops->queue_rqs()") Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent e559ee0 commit beadf00

File tree

1 file changed

+17
-22
lines changed

1 file changed

+17
-22
lines changed

drivers/nvme/host/pci.c

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -906,9 +906,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
906906

907907
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
908908
{
909+
struct request *req;
910+
909911
spin_lock(&nvmeq->sq_lock);
910-
while (!rq_list_empty(*rqlist)) {
911-
struct request *req = rq_list_pop(rqlist);
912+
while ((req = rq_list_pop(rqlist))) {
912913
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
913914

914915
nvme_sq_copy_cmd(nvmeq, &iod->cmd);
@@ -933,31 +934,25 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
933934

934935
static void nvme_queue_rqs(struct request **rqlist)
935936
{
936-
struct request *req, *next, *prev = NULL;
937+
struct request *submit_list = NULL;
937938
struct request *requeue_list = NULL;
939+
struct request **requeue_lastp = &requeue_list;
940+
struct nvme_queue *nvmeq = NULL;
941+
struct request *req;
938942

939-
rq_list_for_each_safe(rqlist, req, next) {
940-
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
941-
942-
if (!nvme_prep_rq_batch(nvmeq, req)) {
943-
/* detach 'req' and add to remainder list */
944-
rq_list_move(rqlist, &requeue_list, req, prev);
945-
946-
req = prev;
947-
if (!req)
948-
continue;
949-
}
943+
while ((req = rq_list_pop(rqlist))) {
944+
if (nvmeq && nvmeq != req->mq_hctx->driver_data)
945+
nvme_submit_cmds(nvmeq, &submit_list);
946+
nvmeq = req->mq_hctx->driver_data;
950947

951-
if (!next || req->mq_hctx != next->mq_hctx) {
952-
/* detach rest of list, and submit */
953-
req->rq_next = NULL;
954-
nvme_submit_cmds(nvmeq, rqlist);
955-
*rqlist = next;
956-
prev = NULL;
957-
} else
958-
prev = req;
948+
if (nvme_prep_rq_batch(nvmeq, req))
949+
rq_list_add(&submit_list, req); /* reverse order */
950+
else
951+
rq_list_add_tail(&requeue_lastp, req);
959952
}
960953

954+
if (nvmeq)
955+
nvme_submit_cmds(nvmeq, &submit_list);
961956
*rqlist = requeue_list;
962957
}
963958

0 commit comments

Comments
 (0)