@@ -453,6 +453,10 @@ struct io_timeout {
453
453
struct io_timeout_rem {
454
454
struct file * file ;
455
455
u64 addr ;
456
+
457
+ /* timeout update */
458
+ struct timespec64 ts ;
459
+ u32 flags ;
456
460
};
457
461
458
462
struct io_rw {
@@ -867,7 +871,10 @@ static const struct io_op_def io_op_defs[] = {
867
871
.async_size = sizeof (struct io_timeout_data ),
868
872
.work_flags = IO_WQ_WORK_MM ,
869
873
},
870
- [IORING_OP_TIMEOUT_REMOVE ] = {},
874
+ [IORING_OP_TIMEOUT_REMOVE ] = {
875
+ /* used by timeout updates' prep() */
876
+ .work_flags = IO_WQ_WORK_MM ,
877
+ },
871
878
[IORING_OP_ACCEPT ] = {
872
879
.needs_file = 1 ,
873
880
.unbound_nonreg_file = 1 ,
@@ -5671,17 +5678,48 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5671
5678
return 0 ;
5672
5679
}
5673
5680
5681
+ static int io_timeout_update (struct io_ring_ctx * ctx , __u64 user_data ,
5682
+ struct timespec64 * ts , enum hrtimer_mode mode )
5683
+ {
5684
+ struct io_kiocb * req = io_timeout_extract (ctx , user_data );
5685
+ struct io_timeout_data * data ;
5686
+
5687
+ if (IS_ERR (req ))
5688
+ return PTR_ERR (req );
5689
+
5690
+ req -> timeout .off = 0 ; /* noseq */
5691
+ data = req -> async_data ;
5692
+ list_add_tail (& req -> timeout .list , & ctx -> timeout_list );
5693
+ hrtimer_init (& data -> timer , CLOCK_MONOTONIC , mode );
5694
+ data -> timer .function = io_timeout_fn ;
5695
+ hrtimer_start (& data -> timer , timespec64_to_ktime (* ts ), mode );
5696
+ return 0 ;
5697
+ }
5698
+
5674
5699
static int io_timeout_remove_prep (struct io_kiocb * req ,
5675
5700
const struct io_uring_sqe * sqe )
5676
5701
{
5702
+ struct io_timeout_rem * tr = & req -> timeout_rem ;
5703
+
5677
5704
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
5678
5705
return - EINVAL ;
5679
5706
if (unlikely (req -> flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT )))
5680
5707
return - EINVAL ;
5681
- if (sqe -> ioprio || sqe -> buf_index || sqe -> len || sqe -> timeout_flags )
5708
+ if (sqe -> ioprio || sqe -> buf_index || sqe -> len )
5709
+ return - EINVAL ;
5710
+
5711
+ tr -> addr = READ_ONCE (sqe -> addr );
5712
+ tr -> flags = READ_ONCE (sqe -> timeout_flags );
5713
+ if (tr -> flags & IORING_TIMEOUT_UPDATE ) {
5714
+ if (tr -> flags & ~(IORING_TIMEOUT_UPDATE |IORING_TIMEOUT_ABS ))
5715
+ return - EINVAL ;
5716
+ if (get_timespec64 (& tr -> ts , u64_to_user_ptr (sqe -> addr2 )))
5717
+ return - EFAULT ;
5718
+ } else if (tr -> flags ) {
5719
+ /* timeout removal doesn't support flags */
5682
5720
return - EINVAL ;
5721
+ }
5683
5722
5684
- req -> timeout_rem .addr = READ_ONCE (sqe -> addr );
5685
5723
return 0 ;
5686
5724
}
5687
5725
@@ -5690,11 +5728,19 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
5690
5728
*/
5691
5729
static int io_timeout_remove (struct io_kiocb * req )
5692
5730
{
5731
+ struct io_timeout_rem * tr = & req -> timeout_rem ;
5693
5732
struct io_ring_ctx * ctx = req -> ctx ;
5694
5733
int ret ;
5695
5734
5696
5735
spin_lock_irq (& ctx -> completion_lock );
5697
- ret = io_timeout_cancel (ctx , req -> timeout_rem .addr );
5736
+ if (req -> timeout_rem .flags & IORING_TIMEOUT_UPDATE ) {
5737
+ enum hrtimer_mode mode = (tr -> flags & IORING_TIMEOUT_ABS )
5738
+ ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL ;
5739
+
5740
+ ret = io_timeout_update (ctx , tr -> addr , & tr -> ts , mode );
5741
+ } else {
5742
+ ret = io_timeout_cancel (ctx , tr -> addr );
5743
+ }
5698
5744
5699
5745
io_cqring_fill_event (req , ret );
5700
5746
io_commit_cqring (ctx );
0 commit comments