Skip to content

Commit a13030f

Browse files
minhbq-99axboe
authored andcommitted
io_uring: simplify the SQPOLL thread check when cancelling requests
In io_uring_try_cancel_requests, we check whether sq_data->thread == current to determine if the function is called by the SQPOLL thread to do iopoll when IORING_SETUP_SQPOLL is set. This check can race with the SQPOLL thread termination. io_uring_cancel_generic is used in 2 places: io_uring_cancel_generic and io_ring_exit_work. In io_uring_cancel_generic, we have the information whether the current is SQPOLL thread already. And the SQPOLL thread never reaches io_ring_exit_work. So to avoid the racy check, this commit adds a boolean flag to io_uring_try_cancel_requests to determine if the caller is SQPOLL thread. Reported-by: [email protected] Reported-by: Li Zetao <[email protected]> Reviewed-by: Li Zetao <[email protected]> Signed-off-by: Bui Quang Minh <[email protected]> Reviewed-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 94d5744 commit a13030f

File tree

1 file changed

+11
-6
lines changed

1 file changed

+11
-6
lines changed

io_uring/io_uring.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,8 @@ struct io_defer_entry {
143143

144144
static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
145145
struct io_uring_task *tctx,
146-
bool cancel_all);
146+
bool cancel_all,
147+
bool is_sqpoll_thread);
147148

148149
static void io_queue_sqe(struct io_kiocb *req);
149150

@@ -2869,7 +2870,8 @@ static __cold void io_ring_exit_work(struct work_struct *work)
28692870
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
28702871
io_move_task_work_from_local(ctx);
28712872

2872-
while (io_uring_try_cancel_requests(ctx, NULL, true))
2873+
/* The SQPOLL thread never reaches this path */
2874+
while (io_uring_try_cancel_requests(ctx, NULL, true, false))
28732875
cond_resched();
28742876

28752877
if (ctx->sq_data) {
@@ -3037,7 +3039,8 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
30373039

30383040
static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30393041
struct io_uring_task *tctx,
3040-
bool cancel_all)
3042+
bool cancel_all,
3043+
bool is_sqpoll_thread)
30413044
{
30423045
struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, };
30433046
enum io_wq_cancel cret;
@@ -3067,7 +3070,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30673070

30683071
/* SQPOLL thread does its own polling */
30693072
if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3070-
(ctx->sq_data && ctx->sq_data->thread == current)) {
3073+
is_sqpoll_thread) {
30713074
while (!wq_list_empty(&ctx->iopoll_list)) {
30723075
io_iopoll_try_reap_events(ctx);
30733076
ret = true;
@@ -3140,13 +3143,15 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
31403143
continue;
31413144
loop |= io_uring_try_cancel_requests(node->ctx,
31423145
current->io_uring,
3143-
cancel_all);
3146+
cancel_all,
3147+
false);
31443148
}
31453149
} else {
31463150
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
31473151
loop |= io_uring_try_cancel_requests(ctx,
31483152
current->io_uring,
3149-
cancel_all);
3153+
cancel_all,
3154+
true);
31503155
}
31513156

31523157
if (loop) {

0 commit comments

Comments
 (0)