Skip to content

Commit f010505

Browse files
marcelo-gonzalezaxboe
authored andcommitted
io_uring: flush timeouts that should already have expired
Right now io_flush_timeouts() checks if the current number of events is equal to ->timeout.target_seq, but this will miss some timeouts if there have been more than 1 event added since the last time they were flushed (possible in io_submit_flush_completions(), for example). Fix it by recording the last sequence at which timeouts were flushed so that the number of events seen can be compared to the number of events needed without overflow. Signed-off-by: Marcelo Diop-Gonzalez <[email protected]> Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 06585c4 commit f010505

File tree

1 file changed

+30
-4
lines changed

1 file changed

+30
-4
lines changed

fs/io_uring.c

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,7 @@ struct io_ring_ctx {
354354
unsigned cq_entries;
355355
unsigned cq_mask;
356356
atomic_t cq_timeouts;
357+
unsigned cq_last_tm_flush;
357358
unsigned long cq_check_overflow;
358359
struct wait_queue_head cq_wait;
359360
struct fasync_struct *cq_fasync;
@@ -1639,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
16391640

16401641
static void io_flush_timeouts(struct io_ring_ctx *ctx)
16411642
{
1642-
while (!list_empty(&ctx->timeout_list)) {
1643+
u32 seq;
1644+
1645+
if (list_empty(&ctx->timeout_list))
1646+
return;
1647+
1648+
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1649+
1650+
do {
1651+
u32 events_needed, events_got;
16431652
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
16441653
struct io_kiocb, timeout.list);
16451654

16461655
if (io_is_timeout_noseq(req))
16471656
break;
1648-
if (req->timeout.target_seq != ctx->cached_cq_tail
1649-
- atomic_read(&ctx->cq_timeouts))
1657+
1658+
/*
1659+
* Since seq can easily wrap around over time, subtract
1660+
* the last seq at which timeouts were flushed before comparing.
1661+
* Assuming not more than 2^31-1 events have happened since,
1662+
* these subtractions won't have wrapped, so we can check if
1663+
* target is in [last_seq, current_seq] by comparing the two.
1664+
*/
1665+
events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1666+
events_got = seq - ctx->cq_last_tm_flush;
1667+
if (events_got < events_needed)
16501668
break;
16511669

16521670
list_del_init(&req->timeout.list);
16531671
io_kill_timeout(req);
1654-
}
1672+
} while (!list_empty(&ctx->timeout_list));
1673+
1674+
ctx->cq_last_tm_flush = seq;
16551675
}
16561676

16571677
static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -5837,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
58375857
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
58385858
req->timeout.target_seq = tail + off;
58395859

5860+
/* Update the last seq here in case io_flush_timeouts() hasn't.
5861+
* This is safe because ->completion_lock is held, and submissions
5862+
* and completions are never mixed in the same ->completion_lock section.
5863+
*/
5864+
ctx->cq_last_tm_flush = tail;
5865+
58405866
/*
58415867
* Insertion sort, ensuring the first entry in the list is always
58425868
* the one we need first.

0 commit comments

Comments
 (0)