@@ -1260,6 +1260,9 @@ static void __io_req_aux_free(struct io_kiocb *req)
1260
1260
{
1261
1261
struct io_ring_ctx * ctx = req -> ctx ;
1262
1262
1263
+ if (req -> flags & REQ_F_NEED_CLEANUP )
1264
+ io_cleanup_req (req );
1265
+
1263
1266
kfree (req -> io );
1264
1267
if (req -> file ) {
1265
1268
if (req -> flags & REQ_F_FIXED_FILE )
@@ -1275,9 +1278,6 @@ static void __io_free_req(struct io_kiocb *req)
1275
1278
{
1276
1279
__io_req_aux_free (req );
1277
1280
1278
- if (req -> flags & REQ_F_NEED_CLEANUP )
1279
- io_cleanup_req (req );
1280
-
1281
1281
if (req -> flags & REQ_F_INFLIGHT ) {
1282
1282
struct io_ring_ctx * ctx = req -> ctx ;
1283
1283
unsigned long flags ;
@@ -1672,11 +1672,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1672
1672
mutex_unlock (& ctx -> uring_lock );
1673
1673
}
1674
1674
1675
- static int __io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1676
- long min )
1675
+ static int io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1676
+ long min )
1677
1677
{
1678
1678
int iters = 0 , ret = 0 ;
1679
1679
1680
+ /*
1681
+ * We disallow the app entering submit/complete with polling, but we
1682
+ * still need to lock the ring to prevent racing with polled issue
1683
+ * that got punted to a workqueue.
1684
+ */
1685
+ mutex_lock (& ctx -> uring_lock );
1680
1686
do {
1681
1687
int tmin = 0 ;
1682
1688
@@ -1712,21 +1718,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1712
1718
ret = 0 ;
1713
1719
} while (min && !* nr_events && !need_resched ());
1714
1720
1715
- return ret ;
1716
- }
1717
-
1718
- static int io_iopoll_check (struct io_ring_ctx * ctx , unsigned * nr_events ,
1719
- long min )
1720
- {
1721
- int ret ;
1722
-
1723
- /*
1724
- * We disallow the app entering submit/complete with polling, but we
1725
- * still need to lock the ring to prevent racing with polled issue
1726
- * that got punted to a workqueue.
1727
- */
1728
- mutex_lock (& ctx -> uring_lock );
1729
- ret = __io_iopoll_check (ctx , nr_events , min );
1730
1721
mutex_unlock (& ctx -> uring_lock );
1731
1722
return ret ;
1732
1723
}
@@ -2517,6 +2508,9 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
2517
2508
struct io_kiocb * nxt = NULL ;
2518
2509
int ret ;
2519
2510
2511
+ if (io_req_cancelled (req ))
2512
+ return ;
2513
+
2520
2514
ret = vfs_fallocate (req -> file , req -> sync .mode , req -> sync .off ,
2521
2515
req -> sync .len );
2522
2516
if (ret < 0 )
@@ -2904,6 +2898,7 @@ static void io_close_finish(struct io_wq_work **workptr)
2904
2898
struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
2905
2899
struct io_kiocb * nxt = NULL ;
2906
2900
2901
+ /* not cancellable, don't do io_req_cancelled() */
2907
2902
__io_close_finish (req , & nxt );
2908
2903
if (nxt )
2909
2904
io_wq_assign_next (workptr , nxt );
@@ -3071,7 +3066,7 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
3071
3066
if (req -> io )
3072
3067
return - EAGAIN ;
3073
3068
if (io_alloc_async_ctx (req )) {
3074
- if (kmsg && kmsg -> iov != kmsg -> fast_iov )
3069
+ if (kmsg -> iov != kmsg -> fast_iov )
3075
3070
kfree (kmsg -> iov );
3076
3071
return - ENOMEM ;
3077
3072
}
@@ -3225,7 +3220,7 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
3225
3220
if (req -> io )
3226
3221
return - EAGAIN ;
3227
3222
if (io_alloc_async_ctx (req )) {
3228
- if (kmsg && kmsg -> iov != kmsg -> fast_iov )
3223
+ if (kmsg -> iov != kmsg -> fast_iov )
3229
3224
kfree (kmsg -> iov );
3230
3225
return - ENOMEM ;
3231
3226
}
@@ -5114,7 +5109,7 @@ static int io_sq_thread(void *data)
5114
5109
*/
5115
5110
mutex_lock (& ctx -> uring_lock );
5116
5111
if (!list_empty (& ctx -> poll_list ))
5117
- __io_iopoll_check (ctx , & nr_events , 0 );
5112
+ io_iopoll_getevents (ctx , & nr_events , 0 );
5118
5113
else
5119
5114
inflight = 0 ;
5120
5115
mutex_unlock (& ctx -> uring_lock );
@@ -5138,6 +5133,18 @@ static int io_sq_thread(void *data)
5138
5133
* to enter the kernel to reap and flush events.
5139
5134
*/
5140
5135
if (!to_submit || ret == - EBUSY ) {
5136
+ /*
5137
+ * Drop cur_mm before scheduling, we can't hold it for
5138
+ * long periods (or over schedule()). Do this before
5139
+ * adding ourselves to the waitqueue, as the unuse/drop
5140
+ * may sleep.
5141
+ */
5142
+ if (cur_mm ) {
5143
+ unuse_mm (cur_mm );
5144
+ mmput (cur_mm );
5145
+ cur_mm = NULL ;
5146
+ }
5147
+
5141
5148
/*
5142
5149
* We're polling. If we're within the defined idle
5143
5150
* period, then let us spin without work before going
@@ -5152,18 +5159,6 @@ static int io_sq_thread(void *data)
5152
5159
continue ;
5153
5160
}
5154
5161
5155
- /*
5156
- * Drop cur_mm before scheduling, we can't hold it for
5157
- * long periods (or over schedule()). Do this before
5158
- * adding ourselves to the waitqueue, as the unuse/drop
5159
- * may sleep.
5160
- */
5161
- if (cur_mm ) {
5162
- unuse_mm (cur_mm );
5163
- mmput (cur_mm );
5164
- cur_mm = NULL ;
5165
- }
5166
-
5167
5162
prepare_to_wait (& ctx -> sqo_wait , & wait ,
5168
5163
TASK_INTERRUPTIBLE );
5169
5164
0 commit comments