Skip to content

Commit 5601591

Browse files
committed
Merge tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "Just a few minor fixes/changes: - Fix issue with double free race for linked timeout completions - Fix reference issue with timeouts - Remove last few places that make SQPOLL special, since it's just an io thread now. - Bump maximum allowed registered buffers, as we don't allocate as much anymore" * tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block: io_uring: increase max number of reg buffers io_uring: further remove sqpoll limits on opcodes io_uring: fix ltout double free on completion race io_uring: fix link timeout refs
2 parents 41f035c + 489809e commit 5601591

File tree

1 file changed

+10
-9
lines changed

1 file changed

+10
-9
lines changed

fs/io_uring.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,8 @@
100100
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101101
IORING_REGISTER_LAST + IORING_OP_LAST)
102102

103+
#define IORING_MAX_REG_BUFFERS (1U << 14)
104+
103105
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
104106
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
105107
IOSQE_BUFFER_SELECT)
@@ -4035,7 +4037,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
40354037
#if defined(CONFIG_EPOLL)
40364038
if (sqe->ioprio || sqe->buf_index)
40374039
return -EINVAL;
4038-
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4040+
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
40394041
return -EINVAL;
40404042

40414043
req->epoll.epfd = READ_ONCE(sqe->fd);
@@ -4150,7 +4152,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
41504152

41514153
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
41524154
{
4153-
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4155+
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
41544156
return -EINVAL;
41554157
if (sqe->ioprio || sqe->buf_index)
41564158
return -EINVAL;
@@ -5827,8 +5829,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
58275829
static int io_rsrc_update_prep(struct io_kiocb *req,
58285830
const struct io_uring_sqe *sqe)
58295831
{
5830-
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5831-
return -EINVAL;
58325832
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
58335833
return -EINVAL;
58345834
if (sqe->ioprio || sqe->rw_flags)
@@ -6354,19 +6354,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
63546354
* We don't expect the list to be empty, that will only happen if we
63556355
* race with the completion of the linked work.
63566356
*/
6357-
if (prev && req_ref_inc_not_zero(prev))
6357+
if (prev) {
63586358
io_remove_next_linked(prev);
6359-
else
6360-
prev = NULL;
6359+
if (!req_ref_inc_not_zero(prev))
6360+
prev = NULL;
6361+
}
63616362
spin_unlock_irqrestore(&ctx->completion_lock, flags);
63626363

63636364
if (prev) {
63646365
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
63656366
io_put_req_deferred(prev, 1);
6367+
io_put_req_deferred(req, 1);
63666368
} else {
63676369
io_req_complete_post(req, -ETIME, 0);
63686370
}
6369-
io_put_req_deferred(req, 1);
63706371
return HRTIMER_NORESTART;
63716372
}
63726373

@@ -8390,7 +8391,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
83908391

83918392
if (ctx->user_bufs)
83928393
return -EBUSY;
8393-
if (!nr_args || nr_args > UIO_MAXIOV)
8394+
if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
83948395
return -EINVAL;
83958396
ret = io_rsrc_node_switch_start(ctx);
83968397
if (ret)

0 commit comments

Comments
 (0)