Skip to content

Commit 039a2e8

Browse files
committed
io_uring/rw: reinstate thread check for retries
Allowing retries for everything is arguably the right thing to do, now that every command type is async read from the start. But it's exposed a few issues around missing check for a retry (which cca6571 exposed), and the fixup commit for that isn't necessarily 100% sound in terms of iov_iter state. For now, just revert these two commits. This unfortunately then re-opens the fact that -EAGAIN can get bubbled to userspace for some cases where the kernel very well could just sanely retry them. But until we have all the conditions covered around that, we cannot safely enable that. This reverts commit df604d2. This reverts commit cca6571. Signed-off-by: Jens Axboe <[email protected]>
1 parent 6fe4220 commit 039a2e8

File tree

3 files changed

+29
-25
lines changed

3 files changed

+29
-25
lines changed

io_uring/io_uring.c

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -527,19 +527,6 @@ static void io_queue_iowq(struct io_kiocb *req)
527527
io_queue_linked_timeout(link);
528528
}
529529

530-
static void io_tw_requeue_iowq(struct io_kiocb *req, struct io_tw_state *ts)
531-
{
532-
req->flags &= ~REQ_F_REISSUE;
533-
io_queue_iowq(req);
534-
}
535-
536-
void io_tw_queue_iowq(struct io_kiocb *req)
537-
{
538-
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
539-
req->io_task_work.func = io_tw_requeue_iowq;
540-
io_req_task_work_add(req);
541-
}
542-
543530
static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
544531
{
545532
while (!list_empty(&ctx->defer_list)) {

io_uring/io_uring.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
7575
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
7676
bool io_alloc_async_data(struct io_kiocb *req);
7777
void io_req_task_queue(struct io_kiocb *req);
78-
void io_tw_queue_iowq(struct io_kiocb *req);
7978
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
8079
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
8180
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);

io_uring/rw.c

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -396,9 +396,16 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
396396
return NULL;
397397
}
398398

399+
#ifdef CONFIG_BLOCK
400+
static void io_resubmit_prep(struct io_kiocb *req)
401+
{
402+
struct io_async_rw *io = req->async_data;
403+
404+
iov_iter_restore(&io->iter, &io->iter_state);
405+
}
406+
399407
static bool io_rw_should_reissue(struct io_kiocb *req)
400408
{
401-
#ifdef CONFIG_BLOCK
402409
umode_t mode = file_inode(req->file)->i_mode;
403410
struct io_ring_ctx *ctx = req->ctx;
404411

@@ -414,11 +421,23 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
414421
*/
415422
if (percpu_ref_is_dying(&ctx->refs))
416423
return false;
424+
/*
425+
* Play it safe and assume not safe to re-import and reissue if we're
426+
* not in the original thread group (or in task context).
427+
*/
428+
if (!same_thread_group(req->task, current) || !in_task())
429+
return false;
417430
return true;
431+
}
418432
#else
433+
static void io_resubmit_prep(struct io_kiocb *req)
434+
{
435+
}
436+
static bool io_rw_should_reissue(struct io_kiocb *req)
437+
{
419438
return false;
420-
#endif
421439
}
440+
#endif
422441

423442
static void io_req_end_write(struct io_kiocb *req)
424443
{
@@ -455,7 +474,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
455474
* current cycle.
456475
*/
457476
io_req_io_end(req);
458-
io_tw_queue_iowq(req);
477+
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
459478
return true;
460479
}
461480
req_set_fail(req);
@@ -521,7 +540,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
521540
io_req_end_write(req);
522541
if (unlikely(res != req->cqe.res)) {
523542
if (res == -EAGAIN && io_rw_should_reissue(req)) {
524-
io_tw_queue_iowq(req);
543+
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
525544
return;
526545
}
527546
req->cqe.res = res;
@@ -583,10 +602,8 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
583602
}
584603

585604
if (req->flags & REQ_F_REISSUE) {
586-
struct io_async_rw *io = req->async_data;
587-
588605
req->flags &= ~REQ_F_REISSUE;
589-
iov_iter_restore(&io->iter, &io->iter_state);
606+
io_resubmit_prep(req);
590607
return -EAGAIN;
591608
}
592609
return IOU_ISSUE_SKIP_COMPLETE;
@@ -839,8 +856,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
839856
ret = io_iter_do_read(rw, &io->iter);
840857

841858
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
842-
if (req->flags & REQ_F_REISSUE)
843-
return IOU_ISSUE_SKIP_COMPLETE;
859+
req->flags &= ~REQ_F_REISSUE;
844860
/* If we can poll, just do that. */
845861
if (io_file_can_poll(req))
846862
return -EAGAIN;
@@ -1035,8 +1051,10 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
10351051
else
10361052
ret2 = -EINVAL;
10371053

1038-
if (req->flags & REQ_F_REISSUE)
1039-
return IOU_ISSUE_SKIP_COMPLETE;
1054+
if (req->flags & REQ_F_REISSUE) {
1055+
req->flags &= ~REQ_F_REISSUE;
1056+
ret2 = -EAGAIN;
1057+
}
10401058

10411059
/*
10421060
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just

0 commit comments

Comments
 (0)