@@ -2037,6 +2037,28 @@ static bool io_req_cancelled(struct io_kiocb *req)
2037
2037
return false;
2038
2038
}
2039
2039
2040
+ static void io_link_work_cb (struct io_wq_work * * workptr )
2041
+ {
2042
+ struct io_wq_work * work = * workptr ;
2043
+ struct io_kiocb * link = work -> data ;
2044
+
2045
+ io_queue_linked_timeout (link );
2046
+ work -> func = io_wq_submit_work ;
2047
+ }
2048
+
2049
+ static void io_wq_assign_next (struct io_wq_work * * workptr , struct io_kiocb * nxt )
2050
+ {
2051
+ struct io_kiocb * link ;
2052
+
2053
+ io_prep_async_work (nxt , & link );
2054
+ * workptr = & nxt -> work ;
2055
+ if (link ) {
2056
+ nxt -> work .flags |= IO_WQ_WORK_CB ;
2057
+ nxt -> work .func = io_link_work_cb ;
2058
+ nxt -> work .data = link ;
2059
+ }
2060
+ }
2061
+
2040
2062
static void io_fsync_finish (struct io_wq_work * * workptr )
2041
2063
{
2042
2064
struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
@@ -2055,7 +2077,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
2055
2077
io_cqring_add_event (req , ret );
2056
2078
io_put_req_find_next (req , & nxt );
2057
2079
if (nxt )
2058
- * workptr = & nxt -> work ;
2080
+ io_wq_assign_next ( workptr , nxt ) ;
2059
2081
}
2060
2082
2061
2083
static int io_fsync (struct io_kiocb * req , struct io_kiocb * * nxt ,
@@ -2111,7 +2133,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
2111
2133
io_cqring_add_event (req , ret );
2112
2134
io_put_req_find_next (req , & nxt );
2113
2135
if (nxt )
2114
- * workptr = & nxt -> work ;
2136
+ io_wq_assign_next ( workptr , nxt ) ;
2115
2137
}
2116
2138
2117
2139
static int io_sync_file_range (struct io_kiocb * req , struct io_kiocb * * nxt ,
@@ -2377,7 +2399,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
2377
2399
return ;
2378
2400
__io_accept (req , & nxt , false);
2379
2401
if (nxt )
2380
- * workptr = & nxt -> work ;
2402
+ io_wq_assign_next ( workptr , nxt ) ;
2381
2403
}
2382
2404
#endif
2383
2405
@@ -2608,7 +2630,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
2608
2630
req_set_fail_links (req );
2609
2631
io_put_req_find_next (req , & nxt );
2610
2632
if (nxt )
2611
- * workptr = & nxt -> work ;
2633
+ io_wq_assign_next ( workptr , nxt ) ;
2612
2634
}
2613
2635
2614
2636
static int io_poll_wake (struct wait_queue_entry * wait , unsigned mode , int sync ,
@@ -3271,15 +3293,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3271
3293
return 0 ;
3272
3294
}
3273
3295
3274
- static void io_link_work_cb (struct io_wq_work * * workptr )
3275
- {
3276
- struct io_wq_work * work = * workptr ;
3277
- struct io_kiocb * link = work -> data ;
3278
-
3279
- io_queue_linked_timeout (link );
3280
- work -> func = io_wq_submit_work ;
3281
- }
3282
-
3283
3296
static void io_wq_submit_work (struct io_wq_work * * workptr )
3284
3297
{
3285
3298
struct io_wq_work * work = * workptr ;
@@ -3316,17 +3329,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
3316
3329
}
3317
3330
3318
3331
/* if a dependent link is ready, pass it back */
3319
- if (!ret && nxt ) {
3320
- struct io_kiocb * link ;
3321
-
3322
- io_prep_async_work (nxt , & link );
3323
- * workptr = & nxt -> work ;
3324
- if (link ) {
3325
- nxt -> work .flags |= IO_WQ_WORK_CB ;
3326
- nxt -> work .func = io_link_work_cb ;
3327
- nxt -> work .data = link ;
3328
- }
3329
- }
3332
+ if (!ret && nxt )
3333
+ io_wq_assign_next (workptr , nxt );
3330
3334
}
3331
3335
3332
3336
static bool io_req_op_valid (int op )
0 commit comments