@@ -719,7 +719,7 @@ static void io_put_task_remote(struct task_struct *task, int nr)
719
719
struct io_uring_task * tctx = task -> io_uring ;
720
720
721
721
percpu_counter_sub (& tctx -> inflight , nr );
722
- if (unlikely (atomic_read (& tctx -> in_idle )))
722
+ if (unlikely (atomic_read (& tctx -> in_cancel )))
723
723
wake_up (& tctx -> wait );
724
724
put_task_struct_many (task , nr );
725
725
}
@@ -1258,8 +1258,8 @@ void tctx_task_work(struct callback_head *cb)
1258
1258
1259
1259
ctx_flush_and_put (ctx , & uring_locked );
1260
1260
1261
- /* relaxed read is enough as only the task itself sets ->in_idle */
1262
- if (unlikely (atomic_read (& tctx -> in_idle )))
1261
+ /* relaxed read is enough as only the task itself sets ->in_cancel */
1262
+ if (unlikely (atomic_read (& tctx -> in_cancel )))
1263
1263
io_uring_drop_tctx_refs (current );
1264
1264
1265
1265
trace_io_uring_task_work_run (tctx , count , loops );
@@ -1291,7 +1291,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
1291
1291
/* needed for the following wake up */
1292
1292
smp_mb__after_atomic ();
1293
1293
1294
- if (unlikely (atomic_read (& req -> task -> io_uring -> in_idle ))) {
1294
+ if (unlikely (atomic_read (& req -> task -> io_uring -> in_cancel ))) {
1295
1295
io_move_task_work_from_local (ctx );
1296
1296
goto put_ref ;
1297
1297
}
@@ -2937,12 +2937,12 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
2937
2937
2938
2938
work = container_of (cb , struct io_tctx_exit , task_work );
2939
2939
/*
2940
- * When @in_idle , we're in cancellation and it's racy to remove the
2940
+ * When @in_cancel , we're in cancellation and it's racy to remove the
2941
2941
* node. It'll be removed by the end of cancellation, just ignore it.
2942
2942
* tctx can be NULL if the queueing of this task_work raced with
2943
2943
* work cancelation off the exec path.
2944
2944
*/
2945
- if (tctx && !atomic_read (& tctx -> in_idle ))
2945
+ if (tctx && !atomic_read (& tctx -> in_cancel ))
2946
2946
io_uring_del_tctx_node ((unsigned long )work -> ctx );
2947
2947
complete (& work -> completion );
2948
2948
}
@@ -3210,7 +3210,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3210
3210
if (tctx -> io_wq )
3211
3211
io_wq_exit_start (tctx -> io_wq );
3212
3212
3213
- atomic_inc (& tctx -> in_idle );
3213
+ atomic_inc (& tctx -> in_cancel );
3214
3214
do {
3215
3215
bool loop = false;
3216
3216
@@ -3261,9 +3261,9 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3261
3261
if (cancel_all ) {
3262
3262
/*
3263
3263
* We shouldn't run task_works after cancel, so just leave
3264
- * ->in_idle set for normal exit.
3264
+ * ->in_cancel set for normal exit.
3265
3265
*/
3266
- atomic_dec (& tctx -> in_idle );
3266
+ atomic_dec (& tctx -> in_cancel );
3267
3267
/* for exec all current's requests should be gone, kill tctx */
3268
3268
__io_uring_free (current );
3269
3269
}
0 commit comments