78
78
#include <linux/task_work.h>
79
79
#include <linux/pagemap.h>
80
80
#include <linux/io_uring.h>
81
- #include <linux/freezer.h>
82
81
83
82
#define CREATE_TRACE_POINTS
84
83
#include <trace/events/io_uring.h>
@@ -1095,8 +1094,6 @@ static bool io_match_task(struct io_kiocb *head,
1095
1094
io_for_each_link (req , head ) {
1096
1095
if (req -> flags & REQ_F_INFLIGHT )
1097
1096
return true;
1098
- if (req -> task -> files == files )
1099
- return true;
1100
1097
}
1101
1098
return false;
1102
1099
}
@@ -1239,16 +1236,16 @@ static void io_queue_async_work(struct io_kiocb *req)
1239
1236
BUG_ON (!tctx );
1240
1237
BUG_ON (!tctx -> io_wq );
1241
1238
1242
- trace_io_uring_queue_async_work (ctx , io_wq_is_hashed (& req -> work ), req ,
1243
- & req -> work , req -> flags );
1244
1239
/* init ->work of the whole link before punting */
1245
1240
io_prep_async_link (req );
1241
+ trace_io_uring_queue_async_work (ctx , io_wq_is_hashed (& req -> work ), req ,
1242
+ & req -> work , req -> flags );
1246
1243
io_wq_enqueue (tctx -> io_wq , & req -> work );
1247
1244
if (link )
1248
1245
io_queue_linked_timeout (link );
1249
1246
}
1250
1247
1251
- static void io_kill_timeout (struct io_kiocb * req )
1248
+ static void io_kill_timeout (struct io_kiocb * req , int status )
1252
1249
{
1253
1250
struct io_timeout_data * io = req -> async_data ;
1254
1251
int ret ;
@@ -1258,31 +1255,11 @@ static void io_kill_timeout(struct io_kiocb *req)
1258
1255
atomic_set (& req -> ctx -> cq_timeouts ,
1259
1256
atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
1260
1257
list_del_init (& req -> timeout .list );
1261
- io_cqring_fill_event (req , 0 );
1258
+ io_cqring_fill_event (req , status );
1262
1259
io_put_req_deferred (req , 1 );
1263
1260
}
1264
1261
}
1265
1262
1266
- /*
1267
- * Returns true if we found and killed one or more timeouts
1268
- */
1269
- static bool io_kill_timeouts (struct io_ring_ctx * ctx , struct task_struct * tsk ,
1270
- struct files_struct * files )
1271
- {
1272
- struct io_kiocb * req , * tmp ;
1273
- int canceled = 0 ;
1274
-
1275
- spin_lock_irq (& ctx -> completion_lock );
1276
- list_for_each_entry_safe (req , tmp , & ctx -> timeout_list , timeout .list ) {
1277
- if (io_match_task (req , tsk , files )) {
1278
- io_kill_timeout (req );
1279
- canceled ++ ;
1280
- }
1281
- }
1282
- spin_unlock_irq (& ctx -> completion_lock );
1283
- return canceled != 0 ;
1284
- }
1285
-
1286
1263
static void __io_queue_deferred (struct io_ring_ctx * ctx )
1287
1264
{
1288
1265
do {
@@ -1327,7 +1304,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
1327
1304
break ;
1328
1305
1329
1306
list_del_init (& req -> timeout .list );
1330
- io_kill_timeout (req );
1307
+ io_kill_timeout (req , 0 );
1331
1308
} while (!list_empty (& ctx -> timeout_list ));
1332
1309
1333
1310
ctx -> cq_last_tm_flush = seq ;
@@ -2524,13 +2501,12 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2524
2501
{
2525
2502
int cflags = 0 ;
2526
2503
2504
+ if (req -> rw .kiocb .ki_flags & IOCB_WRITE )
2505
+ kiocb_end_write (req );
2527
2506
if ((res == - EAGAIN || res == - EOPNOTSUPP ) && io_rw_reissue (req ))
2528
2507
return ;
2529
2508
if (res != req -> result )
2530
2509
req_set_fail_links (req );
2531
-
2532
- if (req -> rw .kiocb .ki_flags & IOCB_WRITE )
2533
- kiocb_end_write (req );
2534
2510
if (req -> flags & REQ_F_BUFFER_SELECTED )
2535
2511
cflags = io_put_rw_kbuf (req );
2536
2512
__io_req_complete (req , issue_flags , res , cflags );
@@ -3978,6 +3954,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
3978
3954
static int io_provide_buffers_prep (struct io_kiocb * req ,
3979
3955
const struct io_uring_sqe * sqe )
3980
3956
{
3957
+ unsigned long size ;
3981
3958
struct io_provide_buf * p = & req -> pbuf ;
3982
3959
u64 tmp ;
3983
3960
@@ -3991,7 +3968,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
3991
3968
p -> addr = READ_ONCE (sqe -> addr );
3992
3969
p -> len = READ_ONCE (sqe -> len );
3993
3970
3994
- if (!access_ok (u64_to_user_ptr (p -> addr ), (p -> len * p -> nbufs )))
3971
+ size = (unsigned long )p -> len * p -> nbufs ;
3972
+ if (!access_ok (u64_to_user_ptr (p -> addr ), size ))
3995
3973
return - EFAULT ;
3996
3974
3997
3975
p -> bgid = READ_ONCE (sqe -> buf_group );
@@ -4820,7 +4798,6 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
4820
4798
ret = - ENOMEM ;
4821
4799
goto out ;
4822
4800
}
4823
- io = req -> async_data ;
4824
4801
memcpy (req -> async_data , & __io , sizeof (__io ));
4825
4802
return - EAGAIN ;
4826
4803
}
@@ -5583,7 +5560,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5583
5560
5584
5561
data -> mode = io_translate_timeout_mode (flags );
5585
5562
hrtimer_init (& data -> timer , CLOCK_MONOTONIC , data -> mode );
5586
- io_req_track_inflight (req );
5563
+ if (is_timeout_link )
5564
+ io_req_track_inflight (req );
5587
5565
return 0 ;
5588
5566
}
5589
5567
@@ -6479,15 +6457,15 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
6479
6457
ret = io_init_req (ctx , req , sqe );
6480
6458
if (unlikely (ret )) {
6481
6459
fail_req :
6482
- io_put_req (req );
6483
- io_req_complete (req , ret );
6484
6460
if (link -> head ) {
6485
6461
/* fail even hard links since we don't submit */
6486
6462
link -> head -> flags |= REQ_F_FAIL_LINK ;
6487
6463
io_put_req (link -> head );
6488
6464
io_req_complete (link -> head , - ECANCELED );
6489
6465
link -> head = NULL ;
6490
6466
}
6467
+ io_put_req (req );
6468
+ io_req_complete (req , ret );
6491
6469
return ret ;
6492
6470
}
6493
6471
ret = io_req_prep (req , sqe );
@@ -6764,8 +6742,13 @@ static int io_sq_thread(void *data)
6764
6742
timeout = jiffies + sqd -> sq_thread_idle ;
6765
6743
continue ;
6766
6744
}
6767
- if (fatal_signal_pending (current ))
6745
+ if (signal_pending (current )) {
6746
+ struct ksignal ksig ;
6747
+
6748
+ if (!get_signal (& ksig ))
6749
+ continue ;
6768
6750
break ;
6751
+ }
6769
6752
sqt_spin = false;
6770
6753
cap_entries = !list_is_singular (& sqd -> ctx_list );
6771
6754
list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list ) {
@@ -6808,7 +6791,6 @@ static int io_sq_thread(void *data)
6808
6791
6809
6792
mutex_unlock (& sqd -> lock );
6810
6793
schedule ();
6811
- try_to_freeze ();
6812
6794
mutex_lock (& sqd -> lock );
6813
6795
list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list )
6814
6796
io_ring_clear_wakeup_flag (ctx );
@@ -6873,7 +6855,7 @@ static int io_run_task_work_sig(void)
6873
6855
return 1 ;
6874
6856
if (!signal_pending (current ))
6875
6857
return 0 ;
6876
- if (test_tsk_thread_flag ( current , TIF_NOTIFY_SIGNAL ))
6858
+ if (test_thread_flag ( TIF_NOTIFY_SIGNAL ))
6877
6859
return - ERESTARTSYS ;
6878
6860
return - EINTR ;
6879
6861
}
@@ -8563,6 +8545,14 @@ static void io_ring_exit_work(struct work_struct *work)
8563
8545
struct io_tctx_node * node ;
8564
8546
int ret ;
8565
8547
8548
+ /* prevent SQPOLL from submitting new requests */
8549
+ if (ctx -> sq_data ) {
8550
+ io_sq_thread_park (ctx -> sq_data );
8551
+ list_del_init (& ctx -> sqd_list );
8552
+ io_sqd_update_thread_idle (ctx -> sq_data );
8553
+ io_sq_thread_unpark (ctx -> sq_data );
8554
+ }
8555
+
8566
8556
/*
8567
8557
* If we're doing polled IO and end up having requests being
8568
8558
* submitted async (out-of-line), then completions can come in while
@@ -8599,6 +8589,28 @@ static void io_ring_exit_work(struct work_struct *work)
8599
8589
io_ring_ctx_free (ctx );
8600
8590
}
8601
8591
8592
+ /* Returns true if we found and killed one or more timeouts */
8593
+ static bool io_kill_timeouts (struct io_ring_ctx * ctx , struct task_struct * tsk ,
8594
+ struct files_struct * files )
8595
+ {
8596
+ struct io_kiocb * req , * tmp ;
8597
+ int canceled = 0 ;
8598
+
8599
+ spin_lock_irq (& ctx -> completion_lock );
8600
+ list_for_each_entry_safe (req , tmp , & ctx -> timeout_list , timeout .list ) {
8601
+ if (io_match_task (req , tsk , files )) {
8602
+ io_kill_timeout (req , - ECANCELED );
8603
+ canceled ++ ;
8604
+ }
8605
+ }
8606
+ io_commit_cqring (ctx );
8607
+ spin_unlock_irq (& ctx -> completion_lock );
8608
+
8609
+ if (canceled != 0 )
8610
+ io_cqring_ev_posted (ctx );
8611
+ return canceled != 0 ;
8612
+ }
8613
+
8602
8614
static void io_ring_ctx_wait_and_kill (struct io_ring_ctx * ctx )
8603
8615
{
8604
8616
unsigned long index ;
@@ -8614,14 +8626,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8614
8626
io_unregister_personality (ctx , index );
8615
8627
mutex_unlock (& ctx -> uring_lock );
8616
8628
8617
- /* prevent SQPOLL from submitting new requests */
8618
- if (ctx -> sq_data ) {
8619
- io_sq_thread_park (ctx -> sq_data );
8620
- list_del_init (& ctx -> sqd_list );
8621
- io_sqd_update_thread_idle (ctx -> sq_data );
8622
- io_sq_thread_unpark (ctx -> sq_data );
8623
- }
8624
-
8625
8629
io_kill_timeouts (ctx , NULL , NULL );
8626
8630
io_poll_remove_all (ctx , NULL , NULL );
8627
8631
0 commit comments