|
100 | 100 | #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
|
101 | 101 | IORING_REGISTER_LAST + IORING_OP_LAST)
|
102 | 102 |
|
| 103 | +#define IORING_MAX_REG_BUFFERS (1U << 14) |
| 104 | + |
103 | 105 | #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
|
104 | 106 | IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
|
105 | 107 | IOSQE_BUFFER_SELECT)
|
@@ -4035,7 +4037,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
|
4035 | 4037 | #if defined(CONFIG_EPOLL)
|
4036 | 4038 | if (sqe->ioprio || sqe->buf_index)
|
4037 | 4039 | return -EINVAL;
|
4038 |
| - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) |
| 4040 | + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
4039 | 4041 | return -EINVAL;
|
4040 | 4042 |
|
4041 | 4043 | req->epoll.epfd = READ_ONCE(sqe->fd);
|
@@ -4150,7 +4152,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
|
4150 | 4152 |
|
4151 | 4153 | static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
4152 | 4154 | {
|
4153 |
| - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) |
| 4155 | + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
4154 | 4156 | return -EINVAL;
|
4155 | 4157 | if (sqe->ioprio || sqe->buf_index)
|
4156 | 4158 | return -EINVAL;
|
@@ -5827,8 +5829,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
|
5827 | 5829 | static int io_rsrc_update_prep(struct io_kiocb *req,
|
5828 | 5830 | const struct io_uring_sqe *sqe)
|
5829 | 5831 | {
|
5830 |
| - if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) |
5831 |
| - return -EINVAL; |
5832 | 5832 | if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
5833 | 5833 | return -EINVAL;
|
5834 | 5834 | if (sqe->ioprio || sqe->rw_flags)
|
@@ -6354,19 +6354,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
|
6354 | 6354 | * We don't expect the list to be empty, that will only happen if we
|
6355 | 6355 | * race with the completion of the linked work.
|
6356 | 6356 | */
|
6357 |
| - if (prev && req_ref_inc_not_zero(prev)) |
| 6357 | + if (prev) { |
6358 | 6358 | io_remove_next_linked(prev);
|
6359 |
| - else |
6360 |
| - prev = NULL; |
| 6359 | + if (!req_ref_inc_not_zero(prev)) |
| 6360 | + prev = NULL; |
| 6361 | + } |
6361 | 6362 | spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
6362 | 6363 |
|
6363 | 6364 | if (prev) {
|
6364 | 6365 | io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
|
6365 | 6366 | io_put_req_deferred(prev, 1);
|
| 6367 | + io_put_req_deferred(req, 1); |
6366 | 6368 | } else {
|
6367 | 6369 | io_req_complete_post(req, -ETIME, 0);
|
6368 | 6370 | }
|
6369 |
| - io_put_req_deferred(req, 1); |
6370 | 6371 | return HRTIMER_NORESTART;
|
6371 | 6372 | }
|
6372 | 6373 |
|
@@ -8390,7 +8391,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
|
8390 | 8391 |
|
8391 | 8392 | if (ctx->user_bufs)
|
8392 | 8393 | return -EBUSY;
|
8393 |
| - if (!nr_args || nr_args > UIO_MAXIOV) |
| 8394 | + if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) |
8394 | 8395 | return -EINVAL;
|
8395 | 8396 | ret = io_rsrc_node_switch_start(ctx);
|
8396 | 8397 | if (ret)
|
|
0 commit comments