Skip to content

Commit 6d816e0

Browse files
committed
io_uring: hold 'ctx' reference around task_work queue + execute
We're holding the request reference, but we need to go one higher to ensure that the ctx remains valid after the request has finished. If the ring is closed with pending task_work inflight, and the given io_kiocb finishes sync during issue, then we need a reference to the ring itself around the task_work execution cycle. Cc: [email protected] # v5.7+ Reported-by: [email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent efa8480 commit 6d816e0

File tree

1 file changed

+15
-0
lines changed

1 file changed

+15
-0
lines changed

fs/io_uring.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1821,15 +1821,18 @@ static void __io_req_task_submit(struct io_kiocb *req)
18211821
static void io_req_task_submit(struct callback_head *cb)
18221822
{
18231823
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1824+
struct io_ring_ctx *ctx = req->ctx;
18241825

18251826
__io_req_task_submit(req);
1827+
percpu_ref_put(&ctx->refs);
18261828
}
18271829

18281830
static void io_req_task_queue(struct io_kiocb *req)
18291831
{
18301832
int ret;
18311833

18321834
init_task_work(&req->task_work, io_req_task_submit);
1835+
percpu_ref_get(&req->ctx->refs);
18331836

18341837
ret = io_req_task_work_add(req, &req->task_work);
18351838
if (unlikely(ret)) {
@@ -2318,6 +2321,8 @@ static void io_rw_resubmit(struct callback_head *cb)
23182321
refcount_inc(&req->refs);
23192322
io_queue_async_work(req);
23202323
}
2324+
2325+
percpu_ref_put(&ctx->refs);
23212326
}
23222327
#endif
23232328

@@ -2330,6 +2335,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
23302335
return false;
23312336

23322337
init_task_work(&req->task_work, io_rw_resubmit);
2338+
percpu_ref_get(&req->ctx->refs);
2339+
23332340
ret = io_req_task_work_add(req, &req->task_work);
23342341
if (!ret)
23352342
return true;
@@ -3033,6 +3040,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
30333040
list_del_init(&wait->entry);
30343041

30353042
init_task_work(&req->task_work, io_req_task_submit);
3043+
percpu_ref_get(&req->ctx->refs);
3044+
30363045
/* submit ref gets dropped, acquire a new one */
30373046
refcount_inc(&req->refs);
30383047
ret = io_req_task_work_add(req, &req->task_work);
@@ -4565,6 +4574,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
45654574

45664575
req->result = mask;
45674576
init_task_work(&req->task_work, func);
4577+
percpu_ref_get(&req->ctx->refs);
4578+
45684579
/*
45694580
* If this fails, then the task is exiting. When a task exits, the
45704581
* work gets canceled, so just cancel this request as well instead
@@ -4652,11 +4663,13 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
46524663
static void io_poll_task_func(struct callback_head *cb)
46534664
{
46544665
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4666+
struct io_ring_ctx *ctx = req->ctx;
46554667
struct io_kiocb *nxt = NULL;
46564668

46574669
io_poll_task_handler(req, &nxt);
46584670
if (nxt)
46594671
__io_req_task_submit(nxt);
4672+
percpu_ref_put(&ctx->refs);
46604673
}
46614674

46624675
static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
@@ -4752,6 +4765,7 @@ static void io_async_task_func(struct callback_head *cb)
47524765

47534766
if (io_poll_rewait(req, &apoll->poll)) {
47544767
spin_unlock_irq(&ctx->completion_lock);
4768+
percpu_ref_put(&ctx->refs);
47554769
return;
47564770
}
47574771

@@ -4767,6 +4781,7 @@ static void io_async_task_func(struct callback_head *cb)
47674781
else
47684782
__io_req_task_cancel(req, -ECANCELED);
47694783

4784+
percpu_ref_put(&ctx->refs);
47704785
kfree(apoll->double_poll);
47714786
kfree(apoll);
47724787
}

0 commit comments

Comments
 (0)