Skip to content

Commit 2d28390

Browse files
committed
io_uring: ensure deferred timeouts copy necessary data
If we defer a timeout, we should ensure that we copy the timespec when we have consumed the sqe. This is similar to commit f67676d for read/write requests. We already did this correctly for timeouts deferred as links, but do it generally and use the infrastructure added by commit 1a6b74f instead of having the timeout deferral use its own. Signed-off-by: Jens Axboe <[email protected]>
1 parent 901e59b commit 2d28390

File tree

1 file changed

+42
-41
lines changed

1 file changed

+42
-41
lines changed

fs/io_uring.c

Lines changed: 42 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -303,11 +303,6 @@ struct io_timeout_data {
303303
u32 seq_offset;
304304
};
305305

306-
struct io_timeout {
307-
struct file *file;
308-
struct io_timeout_data *data;
309-
};
310-
311306
struct io_async_connect {
312307
struct sockaddr_storage address;
313308
};
@@ -332,6 +327,7 @@ struct io_async_ctx {
332327
struct io_async_rw rw;
333328
struct io_async_msghdr msg;
334329
struct io_async_connect connect;
330+
struct io_timeout_data timeout;
335331
};
336332
};
337333

@@ -346,7 +342,6 @@ struct io_kiocb {
346342
struct file *file;
347343
struct kiocb rw;
348344
struct io_poll_iocb poll;
349-
struct io_timeout timeout;
350345
};
351346

352347
const struct io_uring_sqe *sqe;
@@ -619,7 +614,7 @@ static void io_kill_timeout(struct io_kiocb *req)
619614
{
620615
int ret;
621616

622-
ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
617+
ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
623618
if (ret != -1) {
624619
atomic_inc(&req->ctx->cq_timeouts);
625620
list_del_init(&req->list);
@@ -877,8 +872,6 @@ static void __io_free_req(struct io_kiocb *req)
877872
wake_up(&ctx->inflight_wait);
878873
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
879874
}
880-
if (req->flags & REQ_F_TIMEOUT)
881-
kfree(req->timeout.data);
882875
percpu_ref_put(&ctx->refs);
883876
if (likely(!io_is_fallback_req(req)))
884877
kmem_cache_free(req_cachep, req);
@@ -891,7 +884,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
891884
struct io_ring_ctx *ctx = req->ctx;
892885
int ret;
893886

894-
ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
887+
ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
895888
if (ret != -1) {
896889
io_cqring_fill_event(req, -ECANCELED);
897890
io_commit_cqring(ctx);
@@ -2618,7 +2611,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
26182611
if (ret == -ENOENT)
26192612
return ret;
26202613

2621-
ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
2614+
ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
26222615
if (ret == -1)
26232616
return -EALREADY;
26242617

@@ -2660,7 +2653,8 @@ static int io_timeout_remove(struct io_kiocb *req,
26602653
return 0;
26612654
}
26622655

2663-
static int io_timeout_setup(struct io_kiocb *req)
2656+
static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
2657+
bool is_timeout_link)
26642658
{
26652659
const struct io_uring_sqe *sqe = req->sqe;
26662660
struct io_timeout_data *data;
@@ -2670,15 +2664,14 @@ static int io_timeout_setup(struct io_kiocb *req)
26702664
return -EINVAL;
26712665
if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
26722666
return -EINVAL;
2667+
if (sqe->off && is_timeout_link)
2668+
return -EINVAL;
26732669
flags = READ_ONCE(sqe->timeout_flags);
26742670
if (flags & ~IORING_TIMEOUT_ABS)
26752671
return -EINVAL;
26762672

2677-
data = kzalloc(sizeof(struct io_timeout_data), GFP_KERNEL);
2678-
if (!data)
2679-
return -ENOMEM;
2673+
data = &io->timeout;
26802674
data->req = req;
2681-
req->timeout.data = data;
26822675
req->flags |= REQ_F_TIMEOUT;
26832676

26842677
if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
@@ -2690,6 +2683,7 @@ static int io_timeout_setup(struct io_kiocb *req)
26902683
data->mode = HRTIMER_MODE_REL;
26912684

26922685
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
2686+
req->io = io;
26932687
return 0;
26942688
}
26952689

@@ -2698,13 +2692,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
26982692
unsigned count;
26992693
struct io_ring_ctx *ctx = req->ctx;
27002694
struct io_timeout_data *data;
2695+
struct io_async_ctx *io;
27012696
struct list_head *entry;
27022697
unsigned span = 0;
2703-
int ret;
27042698

2705-
ret = io_timeout_setup(req);
2706-
if (ret)
2707-
return ret;
2699+
io = req->io;
2700+
if (!io) {
2701+
int ret;
2702+
2703+
io = kmalloc(sizeof(*io), GFP_KERNEL);
2704+
if (!io)
2705+
return -ENOMEM;
2706+
ret = io_timeout_prep(req, io, false);
2707+
if (ret) {
2708+
kfree(io);
2709+
return ret;
2710+
}
2711+
}
2712+
data = &req->io->timeout;
27082713

27092714
/*
27102715
* sqe->off holds how many events that need to occur for this
@@ -2720,7 +2725,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
27202725
}
27212726

27222727
req->sequence = ctx->cached_sq_head + count - 1;
2723-
req->timeout.data->seq_offset = count;
2728+
data->seq_offset = count;
27242729

27252730
/*
27262731
* Insertion sort, ensuring the first entry in the list is always
@@ -2731,7 +2736,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
27312736
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
27322737
unsigned nxt_sq_head;
27332738
long long tmp, tmp_nxt;
2734-
u32 nxt_offset = nxt->timeout.data->seq_offset;
2739+
u32 nxt_offset = nxt->io->timeout.seq_offset;
27352740

27362741
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
27372742
continue;
@@ -2764,7 +2769,6 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
27642769
req->sequence -= span;
27652770
add:
27662771
list_add(&req->list, entry);
2767-
data = req->timeout.data;
27682772
data->timer.function = io_timeout_fn;
27692773
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
27702774
spin_unlock_irq(&ctx->completion_lock);
@@ -2872,6 +2876,10 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io)
28722876
case IORING_OP_CONNECT:
28732877
ret = io_connect_prep(req, io);
28742878
break;
2879+
case IORING_OP_TIMEOUT:
2880+
return io_timeout_prep(req, io, false);
2881+
case IORING_OP_LINK_TIMEOUT:
2882+
return io_timeout_prep(req, io, true);
28752883
default:
28762884
req->io = io;
28772885
return 0;
@@ -2899,17 +2907,18 @@ static int io_req_defer(struct io_kiocb *req)
28992907
if (!io)
29002908
return -EAGAIN;
29012909

2910+
ret = io_req_defer_prep(req, io);
2911+
if (ret < 0) {
2912+
kfree(io);
2913+
return ret;
2914+
}
2915+
29022916
spin_lock_irq(&ctx->completion_lock);
29032917
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
29042918
spin_unlock_irq(&ctx->completion_lock);
2905-
kfree(io);
29062919
return 0;
29072920
}
29082921

2909-
ret = io_req_defer_prep(req, io);
2910-
if (ret < 0)
2911-
return ret;
2912-
29132922
trace_io_uring_defer(ctx, req, req->user_data);
29142923
list_add_tail(&req->list, &ctx->defer_list);
29152924
spin_unlock_irq(&ctx->completion_lock);
@@ -3198,7 +3207,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
31983207
*/
31993208
spin_lock_irq(&ctx->completion_lock);
32003209
if (!list_empty(&req->list)) {
3201-
struct io_timeout_data *data = req->timeout.data;
3210+
struct io_timeout_data *data = &req->io->timeout;
32023211

32033212
data->timer.function = io_link_timeout_fn;
32043213
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
@@ -3345,26 +3354,18 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
33453354
if (req->sqe->flags & IOSQE_IO_DRAIN)
33463355
(*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
33473356

3348-
if (READ_ONCE(req->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
3349-
ret = io_timeout_setup(req);
3350-
/* common setup allows offset being set, we don't */
3351-
if (!ret && req->sqe->off)
3352-
ret = -EINVAL;
3353-
if (ret) {
3354-
prev->flags |= REQ_F_FAIL_LINK;
3355-
goto err_req;
3356-
}
3357-
}
3358-
33593357
io = kmalloc(sizeof(*io), GFP_KERNEL);
33603358
if (!io) {
33613359
ret = -EAGAIN;
33623360
goto err_req;
33633361
}
33643362

33653363
ret = io_req_defer_prep(req, io);
3366-
if (ret)
3364+
if (ret) {
3365+
kfree(io);
3366+
prev->flags |= REQ_F_FAIL_LINK;
33673367
goto err_req;
3368+
}
33683369
trace_io_uring_link(ctx, req, prev);
33693370
list_add_tail(&req->list, &prev->link_list);
33703371
} else if (req->sqe->flags & IOSQE_IO_LINK) {

0 commit comments

Comments
 (0)