@@ -17,6 +17,7 @@ struct io_timeout {
17
17
struct file * file ;
18
18
u32 off ;
19
19
u32 target_seq ;
20
+ u32 repeats ;
20
21
struct list_head list ;
21
22
/* head of the link, used by linked timeouts only */
22
23
struct io_kiocb * head ;
@@ -37,8 +38,9 @@ struct io_timeout_rem {
37
38
static inline bool io_is_timeout_noseq (struct io_kiocb * req )
38
39
{
39
40
struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
41
+ struct io_timeout_data * data = req -> async_data ;
40
42
41
- return !timeout -> off ;
43
+ return !timeout -> off || data -> flags & IORING_TIMEOUT_MULTISHOT ;
42
44
}
43
45
44
46
static inline void io_put_req (struct io_kiocb * req )
@@ -49,6 +51,44 @@ static inline void io_put_req(struct io_kiocb *req)
49
51
}
50
52
}
51
53
54
+ static inline bool io_timeout_finish (struct io_timeout * timeout ,
55
+ struct io_timeout_data * data )
56
+ {
57
+ if (!(data -> flags & IORING_TIMEOUT_MULTISHOT ))
58
+ return true;
59
+
60
+ if (!timeout -> off || (timeout -> repeats && -- timeout -> repeats ))
61
+ return false;
62
+
63
+ return true;
64
+ }
65
+
66
+ static enum hrtimer_restart io_timeout_fn (struct hrtimer * timer );
67
+
68
+ static void io_timeout_complete (struct io_kiocb * req , struct io_tw_state * ts )
69
+ {
70
+ struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
71
+ struct io_timeout_data * data = req -> async_data ;
72
+ struct io_ring_ctx * ctx = req -> ctx ;
73
+
74
+ if (!io_timeout_finish (timeout , data )) {
75
+ bool filled ;
76
+ filled = io_aux_cqe (ctx , ts -> locked , req -> cqe .user_data , - ETIME ,
77
+ IORING_CQE_F_MORE , false);
78
+ if (filled ) {
79
+ /* re-arm timer */
80
+ spin_lock_irq (& ctx -> timeout_lock );
81
+ list_add (& timeout -> list , ctx -> timeout_list .prev );
82
+ data -> timer .function = io_timeout_fn ;
83
+ hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
84
+ spin_unlock_irq (& ctx -> timeout_lock );
85
+ return ;
86
+ }
87
+ }
88
+
89
+ io_req_task_complete (req , ts );
90
+ }
91
+
52
92
static bool io_kill_timeout (struct io_kiocb * req , int status )
53
93
__must_hold (& req - > ctx - > timeout_lock )
54
94
{
@@ -212,7 +252,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
212
252
req_set_fail (req );
213
253
214
254
io_req_set_res (req , - ETIME , 0 );
215
- req -> io_task_work .func = io_req_task_complete ;
255
+ req -> io_task_work .func = io_timeout_complete ;
216
256
io_req_task_work_add (req );
217
257
return HRTIMER_NORESTART ;
218
258
}
@@ -470,16 +510,27 @@ static int __io_timeout_prep(struct io_kiocb *req,
470
510
return - EINVAL ;
471
511
flags = READ_ONCE (sqe -> timeout_flags );
472
512
if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
473
- IORING_TIMEOUT_ETIME_SUCCESS ))
513
+ IORING_TIMEOUT_ETIME_SUCCESS |
514
+ IORING_TIMEOUT_MULTISHOT ))
474
515
return - EINVAL ;
475
516
/* more than one clock specified is invalid, obviously */
476
517
if (hweight32 (flags & IORING_TIMEOUT_CLOCK_MASK ) > 1 )
477
518
return - EINVAL ;
519
+ /* multishot requests only make sense with rel values */
520
+ if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS )))
521
+ return - EINVAL ;
478
522
479
523
INIT_LIST_HEAD (& timeout -> list );
480
524
timeout -> off = off ;
481
525
if (unlikely (off && !req -> ctx -> off_timeout_used ))
482
526
req -> ctx -> off_timeout_used = true;
527
+ /*
528
+ * for multishot reqs w/ fixed nr of repeats, repeats tracks the
529
+ * remaining nr
530
+ */
531
+ timeout -> repeats = 0 ;
532
+ if ((flags & IORING_TIMEOUT_MULTISHOT ) && off > 0 )
533
+ timeout -> repeats = off ;
483
534
484
535
if (WARN_ON_ONCE (req_has_async_data (req )))
485
536
return - EFAULT ;
0 commit comments