Skip to content

Commit 7f212e9

Browse files
Christoph Hellwigaxboe
authored andcommitted
virtio_blk: reverse request order in virtio_queue_rqs
blk_mq_flush_plug_list submits requests in the reverse order that they were submitted, which leads to a rather suboptimal I/O pattern especially in rotational devices. Fix this by rewriting virtio_queue_rqs so that it always pops the requests from the passed in request list, and then adds them to the head of a local submit list. This actually simplifies the code a bit as it removes the complicated list splicing, at the cost of extra updates of the rq_next pointer. As that should be cache hot anyway it should be an easy price to pay. Fixes: 0e9911f ("virtio-blk: support mq_ops->queue_rqs()") Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent beadf00 commit 7f212e9

File tree

1 file changed

+21
-25
lines changed

1 file changed

+21
-25
lines changed

drivers/block/virtio_blk.c

Lines changed: 21 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
471471
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
472472
}
473473

474-
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
474+
static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
475475
struct request **rqlist)
476476
{
477+
struct request *req;
477478
unsigned long flags;
478-
int err;
479479
bool kick;
480480

481481
spin_lock_irqsave(&vq->lock, flags);
482482

483-
while (!rq_list_empty(*rqlist)) {
484-
struct request *req = rq_list_pop(rqlist);
483+
while ((req = rq_list_pop(rqlist))) {
485484
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
485+
int err;
486486

487487
err = virtblk_add_req(vq->vq, vbr);
488488
if (err) {
@@ -495,37 +495,33 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
495495
kick = virtqueue_kick_prepare(vq->vq);
496496
spin_unlock_irqrestore(&vq->lock, flags);
497497

498-
return kick;
498+
if (kick)
499+
virtqueue_notify(vq->vq);
499500
}
500501

501502
static void virtio_queue_rqs(struct request **rqlist)
502503
{
503-
struct request *req, *next, *prev = NULL;
504+
struct request *submit_list = NULL;
504505
struct request *requeue_list = NULL;
506+
struct request **requeue_lastp = &requeue_list;
507+
struct virtio_blk_vq *vq = NULL;
508+
struct request *req;
505509

506-
rq_list_for_each_safe(rqlist, req, next) {
507-
struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
508-
bool kick;
509-
510-
if (!virtblk_prep_rq_batch(req)) {
511-
rq_list_move(rqlist, &requeue_list, req, prev);
512-
req = prev;
513-
if (!req)
514-
continue;
515-
}
510+
while ((req = rq_list_pop(rqlist))) {
511+
struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
516512

517-
if (!next || req->mq_hctx != next->mq_hctx) {
518-
req->rq_next = NULL;
519-
kick = virtblk_add_req_batch(vq, rqlist);
520-
if (kick)
521-
virtqueue_notify(vq->vq);
513+
if (vq && vq != this_vq)
514+
virtblk_add_req_batch(vq, &submit_list);
515+
vq = this_vq;
522516

523-
*rqlist = next;
524-
prev = NULL;
525-
} else
526-
prev = req;
517+
if (virtblk_prep_rq_batch(req))
518+
rq_list_add(&submit_list, req); /* reverse order */
519+
else
520+
rq_list_add_tail(&requeue_lastp, req);
527521
}
528522

523+
if (vq)
524+
virtblk_add_req_batch(vq, &submit_list);
529525
*rqlist = requeue_list;
530526
}
531527

0 commit comments

Comments
 (0)