Skip to content

Commit 7891293

Browse files
committed
io_uring: be consistent in assigning next work from handler
If we pass back dependent work in case of links, we need to always ensure that we call the link setup and work prep handler. If not, we might be missing some setup for the next work item. Signed-off-by: Jens Axboe <[email protected]>
1 parent e0bbb34 commit 7891293

File tree

1 file changed

+28
-24
lines changed

1 file changed

+28
-24
lines changed

fs/io_uring.c

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2037,6 +2037,28 @@ static bool io_req_cancelled(struct io_kiocb *req)
20372037
return false;
20382038
}
20392039

2040+
static void io_link_work_cb(struct io_wq_work **workptr)
2041+
{
2042+
struct io_wq_work *work = *workptr;
2043+
struct io_kiocb *link = work->data;
2044+
2045+
io_queue_linked_timeout(link);
2046+
work->func = io_wq_submit_work;
2047+
}
2048+
2049+
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
2050+
{
2051+
struct io_kiocb *link;
2052+
2053+
io_prep_async_work(nxt, &link);
2054+
*workptr = &nxt->work;
2055+
if (link) {
2056+
nxt->work.flags |= IO_WQ_WORK_CB;
2057+
nxt->work.func = io_link_work_cb;
2058+
nxt->work.data = link;
2059+
}
2060+
}
2061+
20402062
static void io_fsync_finish(struct io_wq_work **workptr)
20412063
{
20422064
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
@@ -2055,7 +2077,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
20552077
io_cqring_add_event(req, ret);
20562078
io_put_req_find_next(req, &nxt);
20572079
if (nxt)
2058-
*workptr = &nxt->work;
2080+
io_wq_assign_next(workptr, nxt);
20592081
}
20602082

20612083
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
@@ -2111,7 +2133,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
21112133
io_cqring_add_event(req, ret);
21122134
io_put_req_find_next(req, &nxt);
21132135
if (nxt)
2114-
*workptr = &nxt->work;
2136+
io_wq_assign_next(workptr, nxt);
21152137
}
21162138

21172139
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
@@ -2377,7 +2399,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
23772399
return;
23782400
__io_accept(req, &nxt, false);
23792401
if (nxt)
2380-
*workptr = &nxt->work;
2402+
io_wq_assign_next(workptr, nxt);
23812403
}
23822404
#endif
23832405

@@ -2608,7 +2630,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
26082630
req_set_fail_links(req);
26092631
io_put_req_find_next(req, &nxt);
26102632
if (nxt)
2611-
*workptr = &nxt->work;
2633+
io_wq_assign_next(workptr, nxt);
26122634
}
26132635

26142636
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -3271,15 +3293,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
32713293
return 0;
32723294
}
32733295

3274-
static void io_link_work_cb(struct io_wq_work **workptr)
3275-
{
3276-
struct io_wq_work *work = *workptr;
3277-
struct io_kiocb *link = work->data;
3278-
3279-
io_queue_linked_timeout(link);
3280-
work->func = io_wq_submit_work;
3281-
}
3282-
32833296
static void io_wq_submit_work(struct io_wq_work **workptr)
32843297
{
32853298
struct io_wq_work *work = *workptr;
@@ -3316,17 +3329,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
33163329
}
33173330

33183331
/* if a dependent link is ready, pass it back */
3319-
if (!ret && nxt) {
3320-
struct io_kiocb *link;
3321-
3322-
io_prep_async_work(nxt, &link);
3323-
*workptr = &nxt->work;
3324-
if (link) {
3325-
nxt->work.flags |= IO_WQ_WORK_CB;
3326-
nxt->work.func = io_link_work_cb;
3327-
nxt->work.data = link;
3328-
}
3329-
}
3332+
if (!ret && nxt)
3333+
io_wq_assign_next(workptr, nxt);
33303334
}
33313335

33323336
static bool io_req_op_valid(int op)

0 commit comments

Comments
 (0)