Skip to content

Commit 915b3dd

Browse files
Hao Xuaxboe
authored andcommitted
io_uring: spin in iopoll() only when reqs are in a single queue
We currently spin in iopoll() when requests to be iopolled are for same file(device), while one device may have multiple hardware queues. given an example: hw_queue_0 | hw_queue_1 req(30us) req(10us) If we first spin on iopolling for the hw_queue_0. the avg latency would be (30us + 30us) / 2 = 30us. While if we do round robin, the avg latency would be (30us + 10us) / 2 = 20us since we reap the request in hw_queue_1 in time. So it's better to do spinning only when requests are in same hardware queue. Signed-off-by: Hao Xu <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 99ebe4e commit 915b3dd

File tree

1 file changed

+14
-6
lines changed

1 file changed

+14
-6
lines changed

fs/io_uring.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ struct io_ring_ctx {
434434
struct list_head iopoll_list;
435435
struct hlist_head *cancel_hash;
436436
unsigned cancel_hash_bits;
437-
bool poll_multi_file;
437+
bool poll_multi_queue;
438438
} ____cacheline_aligned_in_smp;
439439

440440
struct io_restriction restrictions;
@@ -2314,7 +2314,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
23142314
* Only spin for completions if we don't have multiple devices hanging
23152315
* off our complete list, and we're under the requested amount.
23162316
*/
2317-
spin = !ctx->poll_multi_file && *nr_events < min;
2317+
spin = !ctx->poll_multi_queue && *nr_events < min;
23182318

23192319
ret = 0;
23202320
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
@@ -2553,14 +2553,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
25532553
* different devices.
25542554
*/
25552555
if (list_empty(&ctx->iopoll_list)) {
2556-
ctx->poll_multi_file = false;
2557-
} else if (!ctx->poll_multi_file) {
2556+
ctx->poll_multi_queue = false;
2557+
} else if (!ctx->poll_multi_queue) {
25582558
struct io_kiocb *list_req;
2559+
unsigned int queue_num0, queue_num1;
25592560

25602561
list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
25612562
inflight_entry);
2562-
if (list_req->file != req->file)
2563-
ctx->poll_multi_file = true;
2563+
2564+
if (list_req->file != req->file) {
2565+
ctx->poll_multi_queue = true;
2566+
} else {
2567+
queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2568+
queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2569+
if (queue_num0 != queue_num1)
2570+
ctx->poll_multi_queue = true;
2571+
}
25642572
}
25652573

25662574
/*

0 commit comments

Comments
 (0)