@@ -1066,9 +1066,8 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
1066
1066
return node ;
1067
1067
}
1068
1068
1069
- static __cold void io_fallback_tw (struct io_uring_task * tctx , bool sync )
1069
+ static __cold void __io_fallback_tw (struct llist_node * node , bool sync )
1070
1070
{
1071
- struct llist_node * node = llist_del_all (& tctx -> task_list );
1072
1071
struct io_ring_ctx * last_ctx = NULL ;
1073
1072
struct io_kiocb * req ;
1074
1073
@@ -1094,6 +1093,13 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1094
1093
}
1095
1094
}
1096
1095
1096
+ static void io_fallback_tw (struct io_uring_task * tctx , bool sync )
1097
+ {
1098
+ struct llist_node * node = llist_del_all (& tctx -> task_list );
1099
+
1100
+ __io_fallback_tw (node , sync );
1101
+ }
1102
+
1097
1103
struct llist_node * tctx_task_work_run (struct io_uring_task * tctx ,
1098
1104
unsigned int max_entries ,
1099
1105
unsigned int * count )
@@ -1247,16 +1253,9 @@ void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
1247
1253
1248
1254
static void __cold io_move_task_work_from_local (struct io_ring_ctx * ctx )
1249
1255
{
1250
- struct llist_node * node ;
1256
+ struct llist_node * node = llist_del_all ( & ctx -> work_llist ) ;
1251
1257
1252
- node = llist_del_all (& ctx -> work_llist );
1253
- while (node ) {
1254
- struct io_kiocb * req = container_of (node , struct io_kiocb ,
1255
- io_task_work .node );
1256
-
1257
- node = node -> next ;
1258
- io_req_normal_work_add (req );
1259
- }
1258
+ __io_fallback_tw (node , false);
1260
1259
}
1261
1260
1262
1261
static bool io_run_local_work_continue (struct io_ring_ctx * ctx , int events ,
0 commit comments