Skip to content

Commit 4445656

Browse files
committed
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "A small collection of small fixes that should go into this release: - Two fixes for async request preparation (Pavel) - Busy clear fix for SQPOLL (Xiaoguang) - Don't use kiocb->private for O_DIRECT buf index, some file systems use it (Bijan) - Kill dead check in io_splice() - Ensure sqo_wait is initialized early - Cancel task_work if we fail adding to original process - Only add (IO)pollable requests to iopoll list, fixing a regression in this merge window" * tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block: io_uring: reset -EBUSY error when io sq thread is waken up io_uring: don't add non-IO requests to iopoll pending list io_uring: don't use kiocb.private to store buf_index io_uring: cancel work if task_work_add() fails io_uring: remove dead check in io_splice() io_uring: fix FORCE_ASYNC req preparation io_uring: don't prepare DRAIN reqs twice io_uring: initialize ctx->sqo_wait earlier
2 parents db9f384 + d4ae271 commit 4445656

File tree

1 file changed

+34
-26
lines changed

1 file changed

+34
-26
lines changed

fs/io_uring.c

Lines changed: 34 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,8 @@ struct io_kiocb {
619619
bool needs_fixed_file;
620620
u8 opcode;
621621

622+
u16 buf_index;
623+
622624
struct io_ring_ctx *ctx;
623625
struct list_head list;
624626
unsigned int flags;
@@ -924,6 +926,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
924926
goto err;
925927

926928
ctx->flags = p->flags;
929+
init_waitqueue_head(&ctx->sqo_wait);
927930
init_waitqueue_head(&ctx->cq_wait);
928931
INIT_LIST_HEAD(&ctx->cq_overflow_list);
929932
init_completion(&ctx->completions[0]);
@@ -2100,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
21002103

21012104
req->rw.addr = READ_ONCE(sqe->addr);
21022105
req->rw.len = READ_ONCE(sqe->len);
2103-
/* we own ->private, reuse it for the buffer index / buffer ID */
2104-
req->rw.kiocb.private = (void *) (unsigned long)
2105-
READ_ONCE(sqe->buf_index);
2106+
req->buf_index = READ_ONCE(sqe->buf_index);
21062107
return 0;
21072108
}
21082109

@@ -2145,15 +2146,15 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
21452146
struct io_ring_ctx *ctx = req->ctx;
21462147
size_t len = req->rw.len;
21472148
struct io_mapped_ubuf *imu;
2148-
unsigned index, buf_index;
2149+
u16 index, buf_index;
21492150
size_t offset;
21502151
u64 buf_addr;
21512152

21522153
/* attempt to use fixed buffers without having provided iovecs */
21532154
if (unlikely(!ctx->user_bufs))
21542155
return -EFAULT;
21552156

2156-
buf_index = (unsigned long) req->rw.kiocb.private;
2157+
buf_index = req->buf_index;
21572158
if (unlikely(buf_index >= ctx->nr_user_bufs))
21582159
return -EFAULT;
21592160

@@ -2269,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
22692270
bool needs_lock)
22702271
{
22712272
struct io_buffer *kbuf;
2272-
int bgid;
2273+
u16 bgid;
22732274

22742275
kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2275-
bgid = (int) (unsigned long) req->rw.kiocb.private;
2276+
bgid = req->buf_index;
22762277
kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
22772278
if (IS_ERR(kbuf))
22782279
return kbuf;
@@ -2363,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
23632364
}
23642365

23652366
/* buffer index only valid with fixed read/write, or buffer select */
2366-
if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
2367+
if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
23672368
return -EINVAL;
23682369

23692370
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
@@ -2771,11 +2772,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
27712772
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
27722773
poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
27732774

2774-
if (sp->len) {
2775+
if (sp->len)
27752776
ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
2776-
if (force_nonblock && ret == -EAGAIN)
2777-
return -EAGAIN;
2778-
}
27792777

27802778
io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
27812779
req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -4137,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
41374135
req->result = mask;
41384136
init_task_work(&req->task_work, func);
41394137
/*
4140-
* If this fails, then the task is exiting. Punt to one of the io-wq
4141-
* threads to ensure the work gets run, we can't always rely on exit
4142-
* cancelation taking care of this.
4138+
* If this fails, then the task is exiting. When a task exits, the
4139+
* work gets canceled, so just cancel this request as well instead
4140+
* of executing it. We can't safely execute it anyway, as we may not
4141+
* have the needed state needed for it anyway.
41434142
*/
41444143
ret = task_work_add(tsk, &req->task_work, true);
41454144
if (unlikely(ret)) {
4145+
WRITE_ONCE(poll->canceled, true);
41464146
tsk = io_wq_get_task(req->ctx->io_wq);
41474147
task_work_add(tsk, &req->task_work, true);
41484148
}
@@ -5013,12 +5013,13 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
50135013
if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
50145014
return 0;
50155015

5016-
if (!req->io && io_alloc_async_ctx(req))
5017-
return -EAGAIN;
5018-
5019-
ret = io_req_defer_prep(req, sqe);
5020-
if (ret < 0)
5021-
return ret;
5016+
if (!req->io) {
5017+
if (io_alloc_async_ctx(req))
5018+
return -EAGAIN;
5019+
ret = io_req_defer_prep(req, sqe);
5020+
if (ret < 0)
5021+
return ret;
5022+
}
50225023

50235024
spin_lock_irq(&ctx->completion_lock);
50245025
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
@@ -5305,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
53055306
if (ret)
53065307
return ret;
53075308

5308-
if (ctx->flags & IORING_SETUP_IOPOLL) {
5309+
/* If the op doesn't have a file, we're not polling for it */
5310+
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
53095311
const bool in_async = io_wq_current_is_worker();
53105312

53115313
if (req->result == -EAGAIN)
@@ -5606,9 +5608,15 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
56065608
io_double_put_req(req);
56075609
}
56085610
} else if (req->flags & REQ_F_FORCE_ASYNC) {
5609-
ret = io_req_defer_prep(req, sqe);
5610-
if (unlikely(ret < 0))
5611-
goto fail_req;
5611+
if (!req->io) {
5612+
ret = -EAGAIN;
5613+
if (io_alloc_async_ctx(req))
5614+
goto fail_req;
5615+
ret = io_req_defer_prep(req, sqe);
5616+
if (unlikely(ret < 0))
5617+
goto fail_req;
5618+
}
5619+
56125620
/*
56135621
* Never try inline submit of IOSQE_ASYNC is set, go straight
56145622
* to async execution.
@@ -6024,6 +6032,7 @@ static int io_sq_thread(void *data)
60246032
finish_wait(&ctx->sqo_wait, &wait);
60256033

60266034
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6035+
ret = 0;
60276036
continue;
60286037
}
60296038
finish_wait(&ctx->sqo_wait, &wait);
@@ -6837,7 +6846,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
68376846
{
68386847
int ret;
68396848

6840-
init_waitqueue_head(&ctx->sqo_wait);
68416849
mmgrab(current->mm);
68426850
ctx->sqo_mm = current->mm;
68436851

0 commit comments

Comments
 (0)