@@ -1995,7 +1995,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1995
1995
return __io_req_find_next (req );
1996
1996
}
1997
1997
1998
- static int io_req_task_work_add (struct io_kiocb * req , bool twa_signal_ok )
1998
+ static int io_req_task_work_add (struct io_kiocb * req )
1999
1999
{
2000
2000
struct task_struct * tsk = req -> task ;
2001
2001
struct io_ring_ctx * ctx = req -> ctx ;
@@ -2012,7 +2012,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
2012
2012
* will do the job.
2013
2013
*/
2014
2014
notify = TWA_NONE ;
2015
- if (!(ctx -> flags & IORING_SETUP_SQPOLL ) && twa_signal_ok )
2015
+ if (!(ctx -> flags & IORING_SETUP_SQPOLL ))
2016
2016
notify = TWA_SIGNAL ;
2017
2017
2018
2018
ret = task_work_add (tsk , & req -> task_work , notify );
@@ -2074,7 +2074,7 @@ static void io_req_task_queue(struct io_kiocb *req)
2074
2074
init_task_work (& req -> task_work , io_req_task_submit );
2075
2075
percpu_ref_get (& req -> ctx -> refs );
2076
2076
2077
- ret = io_req_task_work_add (req , true );
2077
+ ret = io_req_task_work_add (req );
2078
2078
if (unlikely (ret )) {
2079
2079
struct task_struct * tsk ;
2080
2080
@@ -2196,7 +2196,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
2196
2196
int ret ;
2197
2197
2198
2198
init_task_work (& req -> task_work , io_put_req_deferred_cb );
2199
- ret = io_req_task_work_add (req , true );
2199
+ ret = io_req_task_work_add (req );
2200
2200
if (unlikely (ret )) {
2201
2201
struct task_struct * tsk ;
2202
2202
@@ -3305,7 +3305,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3305
3305
3306
3306
/* submit ref gets dropped, acquire a new one */
3307
3307
refcount_inc (& req -> refs );
3308
- ret = io_req_task_work_add (req , true );
3308
+ ret = io_req_task_work_add (req );
3309
3309
if (unlikely (ret )) {
3310
3310
struct task_struct * tsk ;
3311
3311
@@ -4843,7 +4843,6 @@ struct io_poll_table {
4843
4843
static int __io_async_wake (struct io_kiocb * req , struct io_poll_iocb * poll ,
4844
4844
__poll_t mask , task_work_func_t func )
4845
4845
{
4846
- bool twa_signal_ok ;
4847
4846
int ret ;
4848
4847
4849
4848
/* for instances that support it check for an event match first: */
@@ -4858,21 +4857,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4858
4857
init_task_work (& req -> task_work , func );
4859
4858
percpu_ref_get (& req -> ctx -> refs );
4860
4859
4861
- /*
4862
- * If we using the signalfd wait_queue_head for this wakeup, then
4863
- * it's not safe to use TWA_SIGNAL as we could be recursing on the
4864
- * tsk->sighand->siglock on doing the wakeup. Should not be needed
4865
- * either, as the normal wakeup will suffice.
4866
- */
4867
- twa_signal_ok = (poll -> head != & req -> task -> sighand -> signalfd_wqh );
4868
-
4869
4860
/*
4870
4861
* If this fails, then the task is exiting. When a task exits, the
4871
4862
* work gets canceled, so just cancel this request as well instead
4872
4863
* of executing it. We can't safely execute it anyway, as we may not
4873
4864
* have the needed state needed for it anyway.
4874
4865
*/
4875
- ret = io_req_task_work_add (req , twa_signal_ok );
4866
+ ret = io_req_task_work_add (req );
4876
4867
if (unlikely (ret )) {
4877
4868
struct task_struct * tsk ;
4878
4869
0 commit comments