Skip to content

Commit d803d12

Browse files
committed
io_uring/rw: handle -EAGAIN retry at IO completion time
Rather than try and have io_read/io_write turn REQ_F_REISSUE into -EAGAIN, catch the REQ_F_REISSUE when the request is otherwise considered as done. This is saner as we know this isn't happening during an actual submission, and it removes the need to randomly check REQ_F_REISSUE after read/write submission. If REQ_F_REISSUE is set, __io_submit_flush_completions() will skip over this request in terms of posting a CQE, and the regular request cleaning will ensure that it gets reissued via io-wq. Signed-off-by: Jens Axboe <[email protected]>
1 parent 9ac273a commit d803d12

File tree

2 files changed

+38
-57
lines changed

2 files changed

+38
-57
lines changed

io_uring/io_uring.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@
115115
REQ_F_ASYNC_DATA)
116116

117117
#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
118-
IO_REQ_CLEAN_FLAGS)
118+
REQ_F_REISSUE | IO_REQ_CLEAN_FLAGS)
119119

120120
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
121121

@@ -1403,6 +1403,12 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
14031403
comp_list);
14041404

14051405
if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1406+
if (req->flags & REQ_F_REISSUE) {
1407+
node = req->comp_list.next;
1408+
req->flags &= ~REQ_F_REISSUE;
1409+
io_queue_iowq(req);
1410+
continue;
1411+
}
14061412
if (req->flags & REQ_F_REFCOUNT) {
14071413
node = req->comp_list.next;
14081414
if (!req_ref_put_and_test(req))
@@ -1442,7 +1448,12 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
14421448
struct io_kiocb *req = container_of(node, struct io_kiocb,
14431449
comp_list);
14441450

1445-
if (!(req->flags & REQ_F_CQE_SKIP) &&
1451+
/*
1452+
* Requests marked with REQUEUE should not post a CQE, they
1453+
* will go through the io-wq retry machinery and post one
1454+
* later.
1455+
*/
1456+
if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) &&
14461457
unlikely(!io_fill_cqe_req(ctx, req))) {
14471458
if (ctx->lockless_cq) {
14481459
spin_lock(&ctx->completion_lock);

io_uring/rw.c

Lines changed: 25 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
202202
* mean that the underlying data can be gone at any time. But that
203203
* should be fixed seperately, and then this check could be killed.
204204
*/
205-
if (!(req->flags & REQ_F_REFCOUNT)) {
205+
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
206206
req->flags &= ~REQ_F_NEED_CLEANUP;
207207
io_rw_recycle(req, issue_flags);
208208
}
@@ -455,19 +455,12 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
455455
return NULL;
456456
}
457457

458-
#ifdef CONFIG_BLOCK
459-
static void io_resubmit_prep(struct io_kiocb *req)
460-
{
461-
struct io_async_rw *io = req->async_data;
462-
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
463-
464-
io_meta_restore(io, &rw->kiocb);
465-
iov_iter_restore(&io->iter, &io->iter_state);
466-
}
467-
468458
static bool io_rw_should_reissue(struct io_kiocb *req)
469459
{
460+
#ifdef CONFIG_BLOCK
461+
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
470462
umode_t mode = file_inode(req->file)->i_mode;
463+
struct io_async_rw *io = req->async_data;
471464
struct io_ring_ctx *ctx = req->ctx;
472465

473466
if (!S_ISBLK(mode) && !S_ISREG(mode))
@@ -488,17 +481,14 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
488481
*/
489482
if (!same_thread_group(req->tctx->task, current) || !in_task())
490483
return false;
484+
485+
io_meta_restore(io, &rw->kiocb);
486+
iov_iter_restore(&io->iter, &io->iter_state);
491487
return true;
492-
}
493488
#else
494-
static void io_resubmit_prep(struct io_kiocb *req)
495-
{
496-
}
497-
static bool io_rw_should_reissue(struct io_kiocb *req)
498-
{
499489
return false;
500-
}
501490
#endif
491+
}
502492

503493
static void io_req_end_write(struct io_kiocb *req)
504494
{
@@ -525,22 +515,16 @@ static void io_req_io_end(struct io_kiocb *req)
525515
}
526516
}
527517

528-
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
518+
static void __io_complete_rw_common(struct io_kiocb *req, long res)
529519
{
530-
if (unlikely(res != req->cqe.res)) {
531-
if (res == -EAGAIN && io_rw_should_reissue(req)) {
532-
/*
533-
* Reissue will start accounting again, finish the
534-
* current cycle.
535-
*/
536-
io_req_io_end(req);
537-
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
538-
return true;
539-
}
520+
if (res == req->cqe.res)
521+
return;
522+
if (res == -EAGAIN && io_rw_should_reissue(req)) {
523+
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
524+
} else {
540525
req_set_fail(req);
541526
req->cqe.res = res;
542527
}
543-
return false;
544528
}
545529

546530
static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
@@ -583,8 +567,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
583567
struct io_kiocb *req = cmd_to_io_kiocb(rw);
584568

585569
if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
586-
if (__io_complete_rw_common(req, res))
587-
return;
570+
__io_complete_rw_common(req, res);
588571
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
589572
}
590573
req->io_task_work.func = io_req_rw_complete;
@@ -646,26 +629,19 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
646629
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
647630
req->file->f_pos = rw->kiocb.ki_pos;
648631
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
649-
if (!__io_complete_rw_common(req, ret)) {
650-
/*
651-
* Safe to call io_end from here as we're inline
652-
* from the submission path.
653-
*/
654-
io_req_io_end(req);
655-
io_req_set_res(req, final_ret,
656-
io_put_kbuf(req, ret, issue_flags));
657-
io_req_rw_cleanup(req, issue_flags);
658-
return IOU_OK;
659-
}
632+
__io_complete_rw_common(req, ret);
633+
/*
634+
* Safe to call io_end from here as we're inline
635+
* from the submission path.
636+
*/
637+
io_req_io_end(req);
638+
io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags));
639+
io_req_rw_cleanup(req, issue_flags);
640+
return IOU_OK;
660641
} else {
661642
io_rw_done(&rw->kiocb, ret);
662643
}
663644

664-
if (req->flags & REQ_F_REISSUE) {
665-
req->flags &= ~REQ_F_REISSUE;
666-
io_resubmit_prep(req);
667-
return -EAGAIN;
668-
}
669645
return IOU_ISSUE_SKIP_COMPLETE;
670646
}
671647

@@ -944,8 +920,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
944920
if (ret == -EOPNOTSUPP && force_nonblock)
945921
ret = -EAGAIN;
946922

947-
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
948-
req->flags &= ~REQ_F_REISSUE;
923+
if (ret == -EAGAIN) {
949924
/* If we can poll, just do that. */
950925
if (io_file_can_poll(req))
951926
return -EAGAIN;
@@ -1154,11 +1129,6 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
11541129
else
11551130
ret2 = -EINVAL;
11561131

1157-
if (req->flags & REQ_F_REISSUE) {
1158-
req->flags &= ~REQ_F_REISSUE;
1159-
ret2 = -EAGAIN;
1160-
}
1161-
11621132
/*
11631133
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
11641134
* retry them without IOCB_NOWAIT.

0 commit comments

Comments
 (0)