@@ -1821,15 +1821,18 @@ static void __io_req_task_submit(struct io_kiocb *req)
1821
1821
static void io_req_task_submit (struct callback_head * cb )
1822
1822
{
1823
1823
struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
1824
+ struct io_ring_ctx * ctx = req -> ctx ;
1824
1825
1825
1826
__io_req_task_submit (req );
1827
+ percpu_ref_put (& ctx -> refs );
1826
1828
}
1827
1829
1828
1830
static void io_req_task_queue (struct io_kiocb * req )
1829
1831
{
1830
1832
int ret ;
1831
1833
1832
1834
init_task_work (& req -> task_work , io_req_task_submit );
1835
+ percpu_ref_get (& req -> ctx -> refs );
1833
1836
1834
1837
ret = io_req_task_work_add (req , & req -> task_work );
1835
1838
if (unlikely (ret )) {
@@ -2318,6 +2321,8 @@ static void io_rw_resubmit(struct callback_head *cb)
2318
2321
refcount_inc (& req -> refs );
2319
2322
io_queue_async_work (req );
2320
2323
}
2324
+
2325
+ percpu_ref_put (& ctx -> refs );
2321
2326
}
2322
2327
#endif
2323
2328
@@ -2330,6 +2335,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
2330
2335
return false;
2331
2336
2332
2337
init_task_work (& req -> task_work , io_rw_resubmit );
2338
+ percpu_ref_get (& req -> ctx -> refs );
2339
+
2333
2340
ret = io_req_task_work_add (req , & req -> task_work );
2334
2341
if (!ret )
2335
2342
return true;
@@ -3033,6 +3040,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3033
3040
list_del_init (& wait -> entry );
3034
3041
3035
3042
init_task_work (& req -> task_work , io_req_task_submit );
3043
+ percpu_ref_get (& req -> ctx -> refs );
3044
+
3036
3045
/* submit ref gets dropped, acquire a new one */
3037
3046
refcount_inc (& req -> refs );
3038
3047
ret = io_req_task_work_add (req , & req -> task_work );
@@ -4565,6 +4574,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4565
4574
4566
4575
req -> result = mask ;
4567
4576
init_task_work (& req -> task_work , func );
4577
+ percpu_ref_get (& req -> ctx -> refs );
4578
+
4568
4579
/*
4569
4580
* If this fails, then the task is exiting. When a task exits, the
4570
4581
* work gets canceled, so just cancel this request as well instead
@@ -4652,11 +4663,13 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4652
4663
static void io_poll_task_func (struct callback_head * cb )
4653
4664
{
4654
4665
struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
4666
+ struct io_ring_ctx * ctx = req -> ctx ;
4655
4667
struct io_kiocb * nxt = NULL ;
4656
4668
4657
4669
io_poll_task_handler (req , & nxt );
4658
4670
if (nxt )
4659
4671
__io_req_task_submit (nxt );
4672
+ percpu_ref_put (& ctx -> refs );
4660
4673
}
4661
4674
4662
4675
static int io_poll_double_wake (struct wait_queue_entry * wait , unsigned mode ,
@@ -4752,6 +4765,7 @@ static void io_async_task_func(struct callback_head *cb)
4752
4765
4753
4766
if (io_poll_rewait (req , & apoll -> poll )) {
4754
4767
spin_unlock_irq (& ctx -> completion_lock );
4768
+ percpu_ref_put (& ctx -> refs );
4755
4769
return ;
4756
4770
}
4757
4771
@@ -4767,6 +4781,7 @@ static void io_async_task_func(struct callback_head *cb)
4767
4781
else
4768
4782
__io_req_task_cancel (req , - ECANCELED );
4769
4783
4784
+ percpu_ref_put (& ctx -> refs );
4770
4785
kfree (apoll -> double_poll );
4771
4786
kfree (apoll );
4772
4787
}
0 commit comments