@@ -142,7 +142,7 @@ struct io_defer_entry {
142142#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
143143
144144static bool io_uring_try_cancel_requests (struct io_ring_ctx * ctx ,
145- struct task_struct * task ,
145+ struct io_uring_task * tctx ,
146146 bool cancel_all );
147147
148148static void io_queue_sqe (struct io_kiocb * req );
@@ -201,12 +201,12 @@ static bool io_match_linked(struct io_kiocb *head)
201201 * As io_match_task() but protected against racing with linked timeouts.
202202 * User must not hold timeout_lock.
203203 */
204- bool io_match_task_safe (struct io_kiocb * head , struct task_struct * task ,
204+ bool io_match_task_safe (struct io_kiocb * head , struct io_uring_task * tctx ,
205205 bool cancel_all )
206206{
207207 bool matched ;
208208
209- if (task && head -> task != task )
209+ if (tctx && head -> task -> io_uring != tctx )
210210 return false;
211211 if (cancel_all )
212212 return true;
@@ -2987,7 +2987,7 @@ static int io_uring_release(struct inode *inode, struct file *file)
29872987}
29882988
29892989struct io_task_cancel {
2990- struct task_struct * task ;
2990+ struct io_uring_task * tctx ;
29912991 bool all ;
29922992};
29932993
@@ -2996,19 +2996,19 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
29962996 struct io_kiocb * req = container_of (work , struct io_kiocb , work );
29972997 struct io_task_cancel * cancel = data ;
29982998
2999- return io_match_task_safe (req , cancel -> task , cancel -> all );
2999+ return io_match_task_safe (req , cancel -> tctx , cancel -> all );
30003000}
30013001
30023002static __cold bool io_cancel_defer_files (struct io_ring_ctx * ctx ,
3003- struct task_struct * task ,
3003+ struct io_uring_task * tctx ,
30043004 bool cancel_all )
30053005{
30063006 struct io_defer_entry * de ;
30073007 LIST_HEAD (list );
30083008
30093009 spin_lock (& ctx -> completion_lock );
30103010 list_for_each_entry_reverse (de , & ctx -> defer_list , list ) {
3011- if (io_match_task_safe (de -> req , task , cancel_all )) {
3011+ if (io_match_task_safe (de -> req , tctx , cancel_all )) {
30123012 list_cut_position (& list , & ctx -> defer_list , & de -> list );
30133013 break ;
30143014 }
@@ -3051,11 +3051,10 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
30513051}
30523052
30533053static __cold bool io_uring_try_cancel_requests (struct io_ring_ctx * ctx ,
3054- struct task_struct * task ,
3054+ struct io_uring_task * tctx ,
30553055 bool cancel_all )
30563056{
3057- struct io_task_cancel cancel = { .task = task , .all = cancel_all , };
3058- struct io_uring_task * tctx = task ? task -> io_uring : NULL ;
3057+ struct io_task_cancel cancel = { .tctx = tctx , .all = cancel_all , };
30593058 enum io_wq_cancel cret ;
30603059 bool ret = false;
30613060
@@ -3069,9 +3068,9 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30693068 if (!ctx -> rings )
30703069 return false;
30713070
3072- if (!task ) {
3071+ if (!tctx ) {
30733072 ret |= io_uring_try_cancel_iowq (ctx );
3074- } else if (tctx && tctx -> io_wq ) {
3073+ } else if (tctx -> io_wq ) {
30753074 /*
30763075 * Cancels requests of all rings, not only @ctx, but
30773076 * it's fine as the task is in exit/exec.
@@ -3094,15 +3093,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30943093 if ((ctx -> flags & IORING_SETUP_DEFER_TASKRUN ) &&
30953094 io_allowed_defer_tw_run (ctx ))
30963095 ret |= io_run_local_work (ctx , INT_MAX ) > 0 ;
3097- ret |= io_cancel_defer_files (ctx , task , cancel_all );
3096+ ret |= io_cancel_defer_files (ctx , tctx , cancel_all );
30983097 mutex_lock (& ctx -> uring_lock );
3099- ret |= io_poll_remove_all (ctx , task , cancel_all );
3100- ret |= io_waitid_remove_all (ctx , task , cancel_all );
3101- ret |= io_futex_remove_all (ctx , task , cancel_all );
3102- ret |= io_uring_try_cancel_uring_cmd (ctx , task , cancel_all );
3098+ ret |= io_poll_remove_all (ctx , tctx , cancel_all );
3099+ ret |= io_waitid_remove_all (ctx , tctx , cancel_all );
3100+ ret |= io_futex_remove_all (ctx , tctx , cancel_all );
3101+ ret |= io_uring_try_cancel_uring_cmd (ctx , tctx , cancel_all );
31033102 mutex_unlock (& ctx -> uring_lock );
3104- ret |= io_kill_timeouts (ctx , task , cancel_all );
3105- if (task )
3103+ ret |= io_kill_timeouts (ctx , tctx , cancel_all );
3104+ if (tctx )
31063105 ret |= io_run_task_work () > 0 ;
31073106 else
31083107 ret |= flush_delayed_work (& ctx -> fallback_work );
@@ -3155,12 +3154,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
31553154 if (node -> ctx -> sq_data )
31563155 continue ;
31573156 loop |= io_uring_try_cancel_requests (node -> ctx ,
3158- current , cancel_all );
3157+ current -> io_uring ,
3158+ cancel_all );
31593159 }
31603160 } else {
31613161 list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list )
31623162 loop |= io_uring_try_cancel_requests (ctx ,
3163- current ,
3163+ current -> io_uring ,
31643164 cancel_all );
31653165 }
31663166
0 commit comments