Skip to content

Commit 106e4df

Browse files
Peter-JanGootzenmstsirkin
authored andcommitted
virtio-fs: improved request latencies when Virtio queue is full
Currently, when the Virtio queue is full, a work item is scheduled to execute in 1ms that retries adding the request to the queue. This is a large amount of time on the scale on which a virtio-fs device can operate. When using a DPU this is around 30-40us baseline without going to a remote server (4k, QD=1). This patch changes the retrying behavior to immediately filling the Virtio queue up again when a completion has been received. This reduces the 99.9th percentile latencies in our tests by 60x and slightly increases the overall throughput, when using a workload IO depth 2x the size of the Virtio queue and a DPU-powered virtio-fs device (NVIDIA BlueField DPU). Signed-off-by: Peter-Jan Gootzen <[email protected]> Reviewed-by: Max Gurtovoy <[email protected]> Reviewed-by: Yoray Zack <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]>
1 parent 2106e1f commit 106e4df

File tree

1 file changed

+19
-15
lines changed

1 file changed

+19
-15
lines changed

fs/fuse/virtio_fs.c

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ struct virtio_fs_vq {
5151
struct work_struct done_work;
5252
struct list_head queued_reqs;
5353
struct list_head end_reqs; /* End these requests */
54-
struct delayed_work dispatch_work;
54+
struct work_struct dispatch_work;
5555
struct fuse_dev *fud;
5656
bool connected;
5757
long in_flight;
@@ -233,7 +233,7 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
233233
}
234234

235235
flush_work(&fsvq->done_work);
236-
flush_delayed_work(&fsvq->dispatch_work);
236+
flush_work(&fsvq->dispatch_work);
237237
}
238238

239239
static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
@@ -408,14 +408,18 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
408408
dec_in_flight_req(fsvq);
409409
}
410410
} while (!virtqueue_enable_cb(vq));
411+
412+
if (!list_empty(&fsvq->queued_reqs))
413+
schedule_work(&fsvq->dispatch_work);
414+
411415
spin_unlock(&fsvq->lock);
412416
}
413417

414418
static void virtio_fs_request_dispatch_work(struct work_struct *work)
415419
{
416420
struct fuse_req *req;
417421
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
418-
dispatch_work.work);
422+
dispatch_work);
419423
int ret;
420424

421425
pr_debug("virtio-fs: worker %s called.\n", __func__);
@@ -450,8 +454,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
450454
if (ret == -ENOSPC) {
451455
spin_lock(&fsvq->lock);
452456
list_add_tail(&req->list, &fsvq->queued_reqs);
453-
schedule_delayed_work(&fsvq->dispatch_work,
454-
msecs_to_jiffies(1));
455457
spin_unlock(&fsvq->lock);
456458
return;
457459
}
@@ -498,8 +500,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
498500
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
499501
ret);
500502
list_add_tail(&forget->list, &fsvq->queued_reqs);
501-
schedule_delayed_work(&fsvq->dispatch_work,
502-
msecs_to_jiffies(1));
503503
if (!in_flight)
504504
inc_in_flight_req(fsvq);
505505
/* Queue is full */
@@ -531,7 +531,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
531531
{
532532
struct virtio_fs_forget *forget;
533533
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
534-
dispatch_work.work);
534+
dispatch_work);
535535
pr_debug("virtio-fs: worker %s called.\n", __func__);
536536
while (1) {
537537
spin_lock(&fsvq->lock);
@@ -709,6 +709,12 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
709709
virtio_fs_request_complete(req, fsvq);
710710
}
711711
}
712+
713+
/* Try to push previously queued requests, as the queue might no longer be full */
714+
spin_lock(&fsvq->lock);
715+
if (!list_empty(&fsvq->queued_reqs))
716+
schedule_work(&fsvq->dispatch_work);
717+
spin_unlock(&fsvq->lock);
712718
}
713719

714720
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
@@ -770,12 +776,12 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
770776

771777
if (vq_type == VQ_REQUEST) {
772778
INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
773-
INIT_DELAYED_WORK(&fsvq->dispatch_work,
774-
virtio_fs_request_dispatch_work);
779+
INIT_WORK(&fsvq->dispatch_work,
780+
virtio_fs_request_dispatch_work);
775781
} else {
776782
INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
777-
INIT_DELAYED_WORK(&fsvq->dispatch_work,
778-
virtio_fs_hiprio_dispatch_work);
783+
INIT_WORK(&fsvq->dispatch_work,
784+
virtio_fs_hiprio_dispatch_work);
779785
}
780786
}
781787

@@ -1375,8 +1381,6 @@ __releases(fiq->lock)
13751381
spin_lock(&fsvq->lock);
13761382
list_add_tail(&req->list, &fsvq->queued_reqs);
13771383
inc_in_flight_req(fsvq);
1378-
schedule_delayed_work(&fsvq->dispatch_work,
1379-
msecs_to_jiffies(1));
13801384
spin_unlock(&fsvq->lock);
13811385
return;
13821386
}
@@ -1386,7 +1390,7 @@ __releases(fiq->lock)
13861390
/* Can't end request in submission context. Use a worker */
13871391
spin_lock(&fsvq->lock);
13881392
list_add_tail(&req->list, &fsvq->end_reqs);
1389-
schedule_delayed_work(&fsvq->dispatch_work, 0);
1393+
schedule_work(&fsvq->dispatch_work);
13901394
spin_unlock(&fsvq->lock);
13911395
return;
13921396
}

0 commit comments

Comments
 (0)