Skip to content

Commit 355fb9e

Browse files
committed
io_uring: remove 'twa_signal_ok' deadlock work-around
The TIF_NOTIFY_SIGNAL based implementation of TWA_SIGNAL is always safe to use, regardless of context, as we won't be recursing into the signal lock. So now that all archs are using that, we can drop this deadlock work-around as it's always safe to use TWA_SIGNAL. Signed-off-by: Jens Axboe <[email protected]>
1 parent e296dc4 commit 355fb9e

File tree

1 file changed

+6
-15
lines changed

1 file changed

+6
-15
lines changed

fs/io_uring.c

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1995,7 +1995,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
19951995
return __io_req_find_next(req);
19961996
}
19971997

1998-
static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
1998+
static int io_req_task_work_add(struct io_kiocb *req)
19991999
{
20002000
struct task_struct *tsk = req->task;
20012001
struct io_ring_ctx *ctx = req->ctx;
@@ -2012,7 +2012,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
20122012
* will do the job.
20132013
*/
20142014
notify = TWA_NONE;
2015-
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
2015+
if (!(ctx->flags & IORING_SETUP_SQPOLL))
20162016
notify = TWA_SIGNAL;
20172017

20182018
ret = task_work_add(tsk, &req->task_work, notify);
@@ -2074,7 +2074,7 @@ static void io_req_task_queue(struct io_kiocb *req)
20742074
init_task_work(&req->task_work, io_req_task_submit);
20752075
percpu_ref_get(&req->ctx->refs);
20762076

2077-
ret = io_req_task_work_add(req, true);
2077+
ret = io_req_task_work_add(req);
20782078
if (unlikely(ret)) {
20792079
struct task_struct *tsk;
20802080

@@ -2196,7 +2196,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
21962196
int ret;
21972197

21982198
init_task_work(&req->task_work, io_put_req_deferred_cb);
2199-
ret = io_req_task_work_add(req, true);
2199+
ret = io_req_task_work_add(req);
22002200
if (unlikely(ret)) {
22012201
struct task_struct *tsk;
22022202

@@ -3305,7 +3305,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
33053305

33063306
/* submit ref gets dropped, acquire a new one */
33073307
refcount_inc(&req->refs);
3308-
ret = io_req_task_work_add(req, true);
3308+
ret = io_req_task_work_add(req);
33093309
if (unlikely(ret)) {
33103310
struct task_struct *tsk;
33113311

@@ -4843,7 +4843,6 @@ struct io_poll_table {
48434843
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
48444844
__poll_t mask, task_work_func_t func)
48454845
{
4846-
bool twa_signal_ok;
48474846
int ret;
48484847

48494848
/* for instances that support it check for an event match first: */
@@ -4858,21 +4857,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
48584857
init_task_work(&req->task_work, func);
48594858
percpu_ref_get(&req->ctx->refs);
48604859

4861-
/*
4862-
* If we using the signalfd wait_queue_head for this wakeup, then
4863-
* it's not safe to use TWA_SIGNAL as we could be recursing on the
4864-
* tsk->sighand->siglock on doing the wakeup. Should not be needed
4865-
* either, as the normal wakeup will suffice.
4866-
*/
4867-
twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
4868-
48694860
/*
48704861
* If this fails, then the task is exiting. When a task exits, the
48714862
* work gets canceled, so just cancel this request as well instead
48724863
* of executing it. We can't safely execute it anyway, as we may not
48734864
* have the needed state needed for it anyway.
48744865
*/
4875-
ret = io_req_task_work_add(req, twa_signal_ok);
4866+
ret = io_req_task_work_add(req);
48764867
if (unlikely(ret)) {
48774868
struct task_struct *tsk;
48784869

0 commit comments

Comments
 (0)