@@ -1025,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
1025
1025
static int io_setup_async_rw (struct io_kiocb * req , const struct iovec * iovec ,
1026
1026
const struct iovec * fast_iov ,
1027
1027
struct iov_iter * iter , bool force );
1028
+ static void io_req_drop_files (struct io_kiocb * req );
1028
1029
1029
1030
static struct kmem_cache * req_cachep ;
1030
1031
@@ -1048,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
1048
1049
1049
1050
static inline void io_clean_op (struct io_kiocb * req )
1050
1051
{
1051
- if (req -> flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1052
- REQ_F_INFLIGHT ))
1052
+ if (req -> flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED ))
1053
1053
__io_clean_op (req );
1054
1054
}
1055
1055
@@ -1075,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head,
1075
1075
return true;
1076
1076
1077
1077
io_for_each_link (req , head ) {
1078
- if ((req -> flags & REQ_F_WORK_INITIALIZED ) &&
1079
- (req -> work .flags & IO_WQ_WORK_FILES ) &&
1078
+ if (!(req -> flags & REQ_F_WORK_INITIALIZED ))
1079
+ continue ;
1080
+ if (req -> file && req -> file -> f_op == & io_uring_fops )
1081
+ return true;
1082
+ if ((req -> work .flags & IO_WQ_WORK_FILES ) &&
1080
1083
req -> work .identity -> files == files )
1081
1084
return true;
1082
1085
}
@@ -1394,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req)
1394
1397
free_fs_struct (fs );
1395
1398
req -> work .flags &= ~IO_WQ_WORK_FS ;
1396
1399
}
1400
+ if (req -> flags & REQ_F_INFLIGHT )
1401
+ io_req_drop_files (req );
1397
1402
1398
1403
io_put_identity (req -> task -> io_uring , req );
1399
1404
}
@@ -1503,11 +1508,14 @@ static bool io_grab_identity(struct io_kiocb *req)
1503
1508
return false;
1504
1509
atomic_inc (& id -> files -> count );
1505
1510
get_nsproxy (id -> nsproxy );
1506
- req -> flags |= REQ_F_INFLIGHT ;
1507
1511
1508
- spin_lock_irq (& ctx -> inflight_lock );
1509
- list_add (& req -> inflight_entry , & ctx -> inflight_list );
1510
- spin_unlock_irq (& ctx -> inflight_lock );
1512
+ if (!(req -> flags & REQ_F_INFLIGHT )) {
1513
+ req -> flags |= REQ_F_INFLIGHT ;
1514
+
1515
+ spin_lock_irq (& ctx -> inflight_lock );
1516
+ list_add (& req -> inflight_entry , & ctx -> inflight_list );
1517
+ spin_unlock_irq (& ctx -> inflight_lock );
1518
+ }
1511
1519
req -> work .flags |= IO_WQ_WORK_FILES ;
1512
1520
}
1513
1521
if (!(req -> work .flags & IO_WQ_WORK_MM ) &&
@@ -2270,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2270
2278
struct io_uring_task * tctx = rb -> task -> io_uring ;
2271
2279
2272
2280
percpu_counter_sub (& tctx -> inflight , rb -> task_refs );
2281
+ if (atomic_read (& tctx -> in_idle ))
2282
+ wake_up (& tctx -> wait );
2273
2283
put_task_struct_many (rb -> task , rb -> task_refs );
2274
2284
rb -> task = NULL ;
2275
2285
}
@@ -2288,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2288
2298
struct io_uring_task * tctx = rb -> task -> io_uring ;
2289
2299
2290
2300
percpu_counter_sub (& tctx -> inflight , rb -> task_refs );
2301
+ if (atomic_read (& tctx -> in_idle ))
2302
+ wake_up (& tctx -> wait );
2291
2303
put_task_struct_many (rb -> task , rb -> task_refs );
2292
2304
}
2293
2305
rb -> task = req -> task ;
@@ -3548,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
3548
3560
3549
3561
/* read it all, or we did blocking attempt. no retry. */
3550
3562
if (!iov_iter_count (iter ) || !force_nonblock ||
3551
- (req -> file -> f_flags & O_NONBLOCK ))
3563
+ (req -> file -> f_flags & O_NONBLOCK ) || !( req -> flags & REQ_F_ISREG ) )
3552
3564
goto done ;
3553
3565
3554
3566
io_size -= ret ;
@@ -4468,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4468
4480
* io_wq_work.flags, so initialize io_wq_work firstly.
4469
4481
*/
4470
4482
io_req_init_async (req );
4471
- req -> work .flags |= IO_WQ_WORK_NO_CANCEL ;
4472
4483
4473
4484
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
4474
4485
return - EINVAL ;
@@ -4501,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
4501
4512
4502
4513
/* if the file has a flush method, be safe and punt to async */
4503
4514
if (close -> put_file -> f_op -> flush && force_nonblock ) {
4515
+ /* not safe to cancel at this point */
4516
+ req -> work .flags |= IO_WQ_WORK_NO_CANCEL ;
4504
4517
/* was never set, but play safe */
4505
4518
req -> flags &= ~REQ_F_NOWAIT ;
4506
4519
/* avoid grabbing files - we don't need the files */
@@ -6157,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req)
6157
6170
struct io_uring_task * tctx = req -> task -> io_uring ;
6158
6171
unsigned long flags ;
6159
6172
6160
- put_files_struct (req -> work .identity -> files );
6161
- put_nsproxy (req -> work .identity -> nsproxy );
6173
+ if (req -> work .flags & IO_WQ_WORK_FILES ) {
6174
+ put_files_struct (req -> work .identity -> files );
6175
+ put_nsproxy (req -> work .identity -> nsproxy );
6176
+ }
6162
6177
spin_lock_irqsave (& ctx -> inflight_lock , flags );
6163
6178
list_del (& req -> inflight_entry );
6164
6179
spin_unlock_irqrestore (& ctx -> inflight_lock , flags );
@@ -6225,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req)
6225
6240
}
6226
6241
req -> flags &= ~REQ_F_NEED_CLEANUP ;
6227
6242
}
6228
-
6229
- if (req -> flags & REQ_F_INFLIGHT )
6230
- io_req_drop_files (req );
6231
6243
}
6232
6244
6233
6245
static int io_issue_sqe (struct io_kiocb * req , bool force_nonblock ,
@@ -6446,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state,
6446
6458
file = __io_file_get (state , fd );
6447
6459
}
6448
6460
6461
+ if (file && file -> f_op == & io_uring_fops ) {
6462
+ io_req_init_async (req );
6463
+ req -> flags |= REQ_F_INFLIGHT ;
6464
+
6465
+ spin_lock_irq (& ctx -> inflight_lock );
6466
+ list_add (& req -> inflight_entry , & ctx -> inflight_list );
6467
+ spin_unlock_irq (& ctx -> inflight_lock );
6468
+ }
6469
+
6449
6470
return file ;
6450
6471
}
6451
6472
@@ -8856,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
8856
8877
8857
8878
spin_lock_irq (& ctx -> inflight_lock );
8858
8879
list_for_each_entry (req , & ctx -> inflight_list , inflight_entry ) {
8859
- if (req -> task != task ||
8860
- req -> work .identity -> files != files )
8880
+ if (!io_match_task (req , task , files ))
8861
8881
continue ;
8862
8882
found = true;
8863
8883
break ;
@@ -8874,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
8874
8894
io_wq_cancel_cb (ctx -> io_wq , io_cancel_task_cb , & cancel , true);
8875
8895
io_poll_remove_all (ctx , task , files );
8876
8896
io_kill_timeouts (ctx , task , files );
8897
+ io_cqring_overflow_flush (ctx , true, task , files );
8877
8898
/* cancellations _may_ trigger task work */
8878
8899
io_run_task_work ();
8879
8900
schedule ();
@@ -8914,8 +8935,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8914
8935
8915
8936
static void io_disable_sqo_submit (struct io_ring_ctx * ctx )
8916
8937
{
8917
- WARN_ON_ONCE (ctx -> sqo_task != current );
8918
-
8919
8938
mutex_lock (& ctx -> uring_lock );
8920
8939
ctx -> sqo_dead = 1 ;
8921
8940
mutex_unlock (& ctx -> uring_lock );
@@ -8937,6 +8956,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8937
8956
8938
8957
if ((ctx -> flags & IORING_SETUP_SQPOLL ) && ctx -> sq_data ) {
8939
8958
/* for SQPOLL only sqo_task has task notes */
8959
+ WARN_ON_ONCE (ctx -> sqo_task != current );
8940
8960
io_disable_sqo_submit (ctx );
8941
8961
task = ctx -> sq_data -> thread ;
8942
8962
atomic_inc (& task -> io_uring -> in_idle );
@@ -9082,6 +9102,10 @@ void __io_uring_task_cancel(void)
9082
9102
/* make sure overflow events are dropped */
9083
9103
atomic_inc (& tctx -> in_idle );
9084
9104
9105
+ /* trigger io_disable_sqo_submit() */
9106
+ if (tctx -> sqpoll )
9107
+ __io_uring_files_cancel (NULL );
9108
+
9085
9109
do {
9086
9110
/* read completions before cancelations */
9087
9111
inflight = tctx_inflight (tctx );
@@ -9128,7 +9152,10 @@ static int io_uring_flush(struct file *file, void *data)
9128
9152
9129
9153
if (ctx -> flags & IORING_SETUP_SQPOLL ) {
9130
9154
/* there is only one file note, which is owned by sqo_task */
9131
- WARN_ON_ONCE ((ctx -> sqo_task == current ) ==
9155
+ WARN_ON_ONCE (ctx -> sqo_task != current &&
9156
+ xa_load (& tctx -> xa , (unsigned long )file ));
9157
+ /* sqo_dead check is for when this happens after cancellation */
9158
+ WARN_ON_ONCE (ctx -> sqo_task == current && !ctx -> sqo_dead &&
9132
9159
!xa_load (& tctx -> xa , (unsigned long )file ));
9133
9160
9134
9161
io_disable_sqo_submit (ctx );
0 commit comments