@@ -303,11 +303,6 @@ struct io_timeout_data {
303
303
u32 seq_offset ;
304
304
};
305
305
306
- struct io_timeout {
307
- struct file * file ;
308
- struct io_timeout_data * data ;
309
- };
310
-
311
306
struct io_async_connect {
312
307
struct sockaddr_storage address ;
313
308
};
@@ -332,6 +327,7 @@ struct io_async_ctx {
332
327
struct io_async_rw rw ;
333
328
struct io_async_msghdr msg ;
334
329
struct io_async_connect connect ;
330
+ struct io_timeout_data timeout ;
335
331
};
336
332
};
337
333
@@ -346,7 +342,6 @@ struct io_kiocb {
346
342
struct file * file ;
347
343
struct kiocb rw ;
348
344
struct io_poll_iocb poll ;
349
- struct io_timeout timeout ;
350
345
};
351
346
352
347
const struct io_uring_sqe * sqe ;
@@ -619,7 +614,7 @@ static void io_kill_timeout(struct io_kiocb *req)
619
614
{
620
615
int ret ;
621
616
622
- ret = hrtimer_try_to_cancel (& req -> timeout . data -> timer );
617
+ ret = hrtimer_try_to_cancel (& req -> io -> timeout . timer );
623
618
if (ret != -1 ) {
624
619
atomic_inc (& req -> ctx -> cq_timeouts );
625
620
list_del_init (& req -> list );
@@ -877,8 +872,6 @@ static void __io_free_req(struct io_kiocb *req)
877
872
wake_up (& ctx -> inflight_wait );
878
873
spin_unlock_irqrestore (& ctx -> inflight_lock , flags );
879
874
}
880
- if (req -> flags & REQ_F_TIMEOUT )
881
- kfree (req -> timeout .data );
882
875
percpu_ref_put (& ctx -> refs );
883
876
if (likely (!io_is_fallback_req (req )))
884
877
kmem_cache_free (req_cachep , req );
@@ -891,7 +884,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
891
884
struct io_ring_ctx * ctx = req -> ctx ;
892
885
int ret ;
893
886
894
- ret = hrtimer_try_to_cancel (& req -> timeout . data -> timer );
887
+ ret = hrtimer_try_to_cancel (& req -> io -> timeout . timer );
895
888
if (ret != -1 ) {
896
889
io_cqring_fill_event (req , - ECANCELED );
897
890
io_commit_cqring (ctx );
@@ -2618,7 +2611,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
2618
2611
if (ret == - ENOENT )
2619
2612
return ret ;
2620
2613
2621
- ret = hrtimer_try_to_cancel (& req -> timeout . data -> timer );
2614
+ ret = hrtimer_try_to_cancel (& req -> io -> timeout . timer );
2622
2615
if (ret == -1 )
2623
2616
return - EALREADY ;
2624
2617
@@ -2660,7 +2653,8 @@ static int io_timeout_remove(struct io_kiocb *req,
2660
2653
return 0 ;
2661
2654
}
2662
2655
2663
- static int io_timeout_setup (struct io_kiocb * req )
2656
+ static int io_timeout_prep (struct io_kiocb * req , struct io_async_ctx * io ,
2657
+ bool is_timeout_link )
2664
2658
{
2665
2659
const struct io_uring_sqe * sqe = req -> sqe ;
2666
2660
struct io_timeout_data * data ;
@@ -2670,15 +2664,14 @@ static int io_timeout_setup(struct io_kiocb *req)
2670
2664
return - EINVAL ;
2671
2665
if (sqe -> ioprio || sqe -> buf_index || sqe -> len != 1 )
2672
2666
return - EINVAL ;
2667
+ if (sqe -> off && is_timeout_link )
2668
+ return - EINVAL ;
2673
2669
flags = READ_ONCE (sqe -> timeout_flags );
2674
2670
if (flags & ~IORING_TIMEOUT_ABS )
2675
2671
return - EINVAL ;
2676
2672
2677
- data = kzalloc (sizeof (struct io_timeout_data ), GFP_KERNEL );
2678
- if (!data )
2679
- return - ENOMEM ;
2673
+ data = & io -> timeout ;
2680
2674
data -> req = req ;
2681
- req -> timeout .data = data ;
2682
2675
req -> flags |= REQ_F_TIMEOUT ;
2683
2676
2684
2677
if (get_timespec64 (& data -> ts , u64_to_user_ptr (sqe -> addr )))
@@ -2690,6 +2683,7 @@ static int io_timeout_setup(struct io_kiocb *req)
2690
2683
data -> mode = HRTIMER_MODE_REL ;
2691
2684
2692
2685
hrtimer_init (& data -> timer , CLOCK_MONOTONIC , data -> mode );
2686
+ req -> io = io ;
2693
2687
return 0 ;
2694
2688
}
2695
2689
@@ -2698,13 +2692,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2698
2692
unsigned count ;
2699
2693
struct io_ring_ctx * ctx = req -> ctx ;
2700
2694
struct io_timeout_data * data ;
2695
+ struct io_async_ctx * io ;
2701
2696
struct list_head * entry ;
2702
2697
unsigned span = 0 ;
2703
- int ret ;
2704
2698
2705
- ret = io_timeout_setup (req );
2706
- if (ret )
2707
- return ret ;
2699
+ io = req -> io ;
2700
+ if (!io ) {
2701
+ int ret ;
2702
+
2703
+ io = kmalloc (sizeof (* io ), GFP_KERNEL );
2704
+ if (!io )
2705
+ return - ENOMEM ;
2706
+ ret = io_timeout_prep (req , io , false);
2707
+ if (ret ) {
2708
+ kfree (io );
2709
+ return ret ;
2710
+ }
2711
+ }
2712
+ data = & req -> io -> timeout ;
2708
2713
2709
2714
/*
2710
2715
* sqe->off holds how many events that need to occur for this
@@ -2720,7 +2725,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2720
2725
}
2721
2726
2722
2727
req -> sequence = ctx -> cached_sq_head + count - 1 ;
2723
- req -> timeout . data -> seq_offset = count ;
2728
+ data -> seq_offset = count ;
2724
2729
2725
2730
/*
2726
2731
* Insertion sort, ensuring the first entry in the list is always
@@ -2731,7 +2736,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2731
2736
struct io_kiocb * nxt = list_entry (entry , struct io_kiocb , list );
2732
2737
unsigned nxt_sq_head ;
2733
2738
long long tmp , tmp_nxt ;
2734
- u32 nxt_offset = nxt -> timeout . data -> seq_offset ;
2739
+ u32 nxt_offset = nxt -> io -> timeout . seq_offset ;
2735
2740
2736
2741
if (nxt -> flags & REQ_F_TIMEOUT_NOSEQ )
2737
2742
continue ;
@@ -2764,7 +2769,6 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2764
2769
req -> sequence -= span ;
2765
2770
add :
2766
2771
list_add (& req -> list , entry );
2767
- data = req -> timeout .data ;
2768
2772
data -> timer .function = io_timeout_fn ;
2769
2773
hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
2770
2774
spin_unlock_irq (& ctx -> completion_lock );
@@ -2872,6 +2876,10 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io)
2872
2876
case IORING_OP_CONNECT :
2873
2877
ret = io_connect_prep (req , io );
2874
2878
break ;
2879
+ case IORING_OP_TIMEOUT :
2880
+ return io_timeout_prep (req , io , false);
2881
+ case IORING_OP_LINK_TIMEOUT :
2882
+ return io_timeout_prep (req , io , true);
2875
2883
default :
2876
2884
req -> io = io ;
2877
2885
return 0 ;
@@ -2899,17 +2907,18 @@ static int io_req_defer(struct io_kiocb *req)
2899
2907
if (!io )
2900
2908
return - EAGAIN ;
2901
2909
2910
+ ret = io_req_defer_prep (req , io );
2911
+ if (ret < 0 ) {
2912
+ kfree (io );
2913
+ return ret ;
2914
+ }
2915
+
2902
2916
spin_lock_irq (& ctx -> completion_lock );
2903
2917
if (!req_need_defer (req ) && list_empty (& ctx -> defer_list )) {
2904
2918
spin_unlock_irq (& ctx -> completion_lock );
2905
- kfree (io );
2906
2919
return 0 ;
2907
2920
}
2908
2921
2909
- ret = io_req_defer_prep (req , io );
2910
- if (ret < 0 )
2911
- return ret ;
2912
-
2913
2922
trace_io_uring_defer (ctx , req , req -> user_data );
2914
2923
list_add_tail (& req -> list , & ctx -> defer_list );
2915
2924
spin_unlock_irq (& ctx -> completion_lock );
@@ -3198,7 +3207,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
3198
3207
*/
3199
3208
spin_lock_irq (& ctx -> completion_lock );
3200
3209
if (!list_empty (& req -> list )) {
3201
- struct io_timeout_data * data = req -> timeout . data ;
3210
+ struct io_timeout_data * data = & req -> io -> timeout ;
3202
3211
3203
3212
data -> timer .function = io_link_timeout_fn ;
3204
3213
hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ),
@@ -3345,26 +3354,18 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
3345
3354
if (req -> sqe -> flags & IOSQE_IO_DRAIN )
3346
3355
(* link )-> flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN ;
3347
3356
3348
- if (READ_ONCE (req -> sqe -> opcode ) == IORING_OP_LINK_TIMEOUT ) {
3349
- ret = io_timeout_setup (req );
3350
- /* common setup allows offset being set, we don't */
3351
- if (!ret && req -> sqe -> off )
3352
- ret = - EINVAL ;
3353
- if (ret ) {
3354
- prev -> flags |= REQ_F_FAIL_LINK ;
3355
- goto err_req ;
3356
- }
3357
- }
3358
-
3359
3357
io = kmalloc (sizeof (* io ), GFP_KERNEL );
3360
3358
if (!io ) {
3361
3359
ret = - EAGAIN ;
3362
3360
goto err_req ;
3363
3361
}
3364
3362
3365
3363
ret = io_req_defer_prep (req , io );
3366
- if (ret )
3364
+ if (ret ) {
3365
+ kfree (io );
3366
+ prev -> flags |= REQ_F_FAIL_LINK ;
3367
3367
goto err_req ;
3368
+ }
3368
3369
trace_io_uring_link (ctx , req , prev );
3369
3370
list_add_tail (& req -> list , & prev -> link_list );
3370
3371
} else if (req -> sqe -> flags & IOSQE_IO_LINK ) {
0 commit comments