Skip to content

Commit af0a2ff

Browse files
isilenceaxboe
authored andcommitted
io_uring: avoid normal tw intermediate fallback
When a DEFER_TASKRUN io_uring is terminating it requeues deferred task work items as normal tw, which can further fallback to kthread execution. Avoid this extra step and always push them to the fallback kthread. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/d1cd472cec2230c66bd1c8d412a5833f0af75384.1730772720.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 6bf90bd commit af0a2ff

File tree

2 files changed

+11
-12
lines changed

2 files changed

+11
-12
lines changed

io_uring/io_uring.c

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1066,9 +1066,8 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
10661066
return node;
10671067
}
10681068

1069-
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1069+
static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
10701070
{
1071-
struct llist_node *node = llist_del_all(&tctx->task_list);
10721071
struct io_ring_ctx *last_ctx = NULL;
10731072
struct io_kiocb *req;
10741073

@@ -1094,6 +1093,13 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
10941093
}
10951094
}
10961095

1096+
static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1097+
{
1098+
struct llist_node *node = llist_del_all(&tctx->task_list);
1099+
1100+
__io_fallback_tw(node, sync);
1101+
}
1102+
10971103
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
10981104
unsigned int max_entries,
10991105
unsigned int *count)
@@ -1247,16 +1253,9 @@ void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
12471253

12481254
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
12491255
{
1250-
struct llist_node *node;
1256+
struct llist_node *node = llist_del_all(&ctx->work_llist);
12511257

1252-
node = llist_del_all(&ctx->work_llist);
1253-
while (node) {
1254-
struct io_kiocb *req = container_of(node, struct io_kiocb,
1255-
io_task_work.node);
1256-
1257-
node = node->next;
1258-
io_req_normal_work_add(req);
1259-
}
1258+
__io_fallback_tw(node, false);
12601259
}
12611260

12621261
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,

io_uring/io_uring.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
136136
* Not from an SQE, as those cannot be submitted, but via
137137
* updating tagged resources.
138138
*/
139-
if (ctx->submitter_task->flags & PF_EXITING)
139+
if (percpu_ref_is_dying(&ctx->refs))
140140
lockdep_assert(current_work());
141141
else
142142
lockdep_assert(current == ctx->submitter_task);

0 commit comments

Comments
 (0)