Skip to content

Commit ef7b1a0

Browse files
committed
Merge tag 'io_uring-5.11-2021-01-24' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "Still need a final cancelation fix that isn't quite done done, expected in the next day or two. That said, this contains: - Wakeup fix for IOPOLL requests - SQPOLL split close op handling fix - Ensure that any use of io_uring fd itself is marked as inflight - Short non-regular file read fix (Pavel) - Fix up bad false positive warning (Pavel) - SQPOLL fixes (Pavel) - In-flight removal fix (Pavel)" * tag 'io_uring-5.11-2021-01-24' of git://git.kernel.dk/linux-block: io_uring: account io_uring internal files as REQ_F_INFLIGHT io_uring: fix sleeping under spin in __io_clean_op io_uring: fix short read retries for non-reg files io_uring: fix SQPOLL IORING_OP_CLOSE cancelation state io_uring: fix skipping disabling sqo on exec io_uring: fix uring_flush in exit_files() warning io_uring: fix false positive sqo warning on flush io_uring: iopoll requests should also wake task ->in_idle state
2 parents a692a61 + 02a1367 commit ef7b1a0

File tree

1 file changed

+47
-20
lines changed

1 file changed

+47
-20
lines changed

fs/io_uring.c

Lines changed: 47 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
10251025
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
10261026
const struct iovec *fast_iov,
10271027
struct iov_iter *iter, bool force);
1028+
static void io_req_drop_files(struct io_kiocb *req);
10281029

10291030
static struct kmem_cache *req_cachep;
10301031

@@ -1048,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
10481049

10491050
static inline void io_clean_op(struct io_kiocb *req)
10501051
{
1051-
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1052-
REQ_F_INFLIGHT))
1052+
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
10531053
__io_clean_op(req);
10541054
}
10551055

@@ -1075,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head,
10751075
return true;
10761076

10771077
io_for_each_link(req, head) {
1078-
if ((req->flags & REQ_F_WORK_INITIALIZED) &&
1079-
(req->work.flags & IO_WQ_WORK_FILES) &&
1078+
if (!(req->flags & REQ_F_WORK_INITIALIZED))
1079+
continue;
1080+
if (req->file && req->file->f_op == &io_uring_fops)
1081+
return true;
1082+
if ((req->work.flags & IO_WQ_WORK_FILES) &&
10801083
req->work.identity->files == files)
10811084
return true;
10821085
}
@@ -1394,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req)
13941397
free_fs_struct(fs);
13951398
req->work.flags &= ~IO_WQ_WORK_FS;
13961399
}
1400+
if (req->flags & REQ_F_INFLIGHT)
1401+
io_req_drop_files(req);
13971402

13981403
io_put_identity(req->task->io_uring, req);
13991404
}
@@ -1503,11 +1508,14 @@ static bool io_grab_identity(struct io_kiocb *req)
15031508
return false;
15041509
atomic_inc(&id->files->count);
15051510
get_nsproxy(id->nsproxy);
1506-
req->flags |= REQ_F_INFLIGHT;
15071511

1508-
spin_lock_irq(&ctx->inflight_lock);
1509-
list_add(&req->inflight_entry, &ctx->inflight_list);
1510-
spin_unlock_irq(&ctx->inflight_lock);
1512+
if (!(req->flags & REQ_F_INFLIGHT)) {
1513+
req->flags |= REQ_F_INFLIGHT;
1514+
1515+
spin_lock_irq(&ctx->inflight_lock);
1516+
list_add(&req->inflight_entry, &ctx->inflight_list);
1517+
spin_unlock_irq(&ctx->inflight_lock);
1518+
}
15111519
req->work.flags |= IO_WQ_WORK_FILES;
15121520
}
15131521
if (!(req->work.flags & IO_WQ_WORK_MM) &&
@@ -2270,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
22702278
struct io_uring_task *tctx = rb->task->io_uring;
22712279

22722280
percpu_counter_sub(&tctx->inflight, rb->task_refs);
2281+
if (atomic_read(&tctx->in_idle))
2282+
wake_up(&tctx->wait);
22732283
put_task_struct_many(rb->task, rb->task_refs);
22742284
rb->task = NULL;
22752285
}
@@ -2288,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
22882298
struct io_uring_task *tctx = rb->task->io_uring;
22892299

22902300
percpu_counter_sub(&tctx->inflight, rb->task_refs);
2301+
if (atomic_read(&tctx->in_idle))
2302+
wake_up(&tctx->wait);
22912303
put_task_struct_many(rb->task, rb->task_refs);
22922304
}
22932305
rb->task = req->task;
@@ -3548,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
35483560

35493561
/* read it all, or we did blocking attempt. no retry. */
35503562
if (!iov_iter_count(iter) || !force_nonblock ||
3551-
(req->file->f_flags & O_NONBLOCK))
3563+
(req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
35523564
goto done;
35533565

35543566
io_size -= ret;
@@ -4468,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
44684480
* io_wq_work.flags, so initialize io_wq_work firstly.
44694481
*/
44704482
io_req_init_async(req);
4471-
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
44724483

44734484
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
44744485
return -EINVAL;
@@ -4501,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
45014512

45024513
/* if the file has a flush method, be safe and punt to async */
45034514
if (close->put_file->f_op->flush && force_nonblock) {
4515+
/* not safe to cancel at this point */
4516+
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
45044517
/* was never set, but play safe */
45054518
req->flags &= ~REQ_F_NOWAIT;
45064519
/* avoid grabbing files - we don't need the files */
@@ -6157,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req)
61576170
struct io_uring_task *tctx = req->task->io_uring;
61586171
unsigned long flags;
61596172

6160-
put_files_struct(req->work.identity->files);
6161-
put_nsproxy(req->work.identity->nsproxy);
6173+
if (req->work.flags & IO_WQ_WORK_FILES) {
6174+
put_files_struct(req->work.identity->files);
6175+
put_nsproxy(req->work.identity->nsproxy);
6176+
}
61626177
spin_lock_irqsave(&ctx->inflight_lock, flags);
61636178
list_del(&req->inflight_entry);
61646179
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
@@ -6225,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req)
62256240
}
62266241
req->flags &= ~REQ_F_NEED_CLEANUP;
62276242
}
6228-
6229-
if (req->flags & REQ_F_INFLIGHT)
6230-
io_req_drop_files(req);
62316243
}
62326244

62336245
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
@@ -6446,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state,
64466458
file = __io_file_get(state, fd);
64476459
}
64486460

6461+
if (file && file->f_op == &io_uring_fops) {
6462+
io_req_init_async(req);
6463+
req->flags |= REQ_F_INFLIGHT;
6464+
6465+
spin_lock_irq(&ctx->inflight_lock);
6466+
list_add(&req->inflight_entry, &ctx->inflight_list);
6467+
spin_unlock_irq(&ctx->inflight_lock);
6468+
}
6469+
64496470
return file;
64506471
}
64516472

@@ -8856,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
88568877

88578878
spin_lock_irq(&ctx->inflight_lock);
88588879
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
8859-
if (req->task != task ||
8860-
req->work.identity->files != files)
8880+
if (!io_match_task(req, task, files))
88618881
continue;
88628882
found = true;
88638883
break;
@@ -8874,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
88748894
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
88758895
io_poll_remove_all(ctx, task, files);
88768896
io_kill_timeouts(ctx, task, files);
8897+
io_cqring_overflow_flush(ctx, true, task, files);
88778898
/* cancellations _may_ trigger task work */
88788899
io_run_task_work();
88798900
schedule();
@@ -8914,8 +8935,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
89148935

89158936
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
89168937
{
8917-
WARN_ON_ONCE(ctx->sqo_task != current);
8918-
89198938
mutex_lock(&ctx->uring_lock);
89208939
ctx->sqo_dead = 1;
89218940
mutex_unlock(&ctx->uring_lock);
@@ -8937,6 +8956,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
89378956

89388957
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
89398958
/* for SQPOLL only sqo_task has task notes */
8959+
WARN_ON_ONCE(ctx->sqo_task != current);
89408960
io_disable_sqo_submit(ctx);
89418961
task = ctx->sq_data->thread;
89428962
atomic_inc(&task->io_uring->in_idle);
@@ -9082,6 +9102,10 @@ void __io_uring_task_cancel(void)
90829102
/* make sure overflow events are dropped */
90839103
atomic_inc(&tctx->in_idle);
90849104

9105+
/* trigger io_disable_sqo_submit() */
9106+
if (tctx->sqpoll)
9107+
__io_uring_files_cancel(NULL);
9108+
90859109
do {
90869110
/* read completions before cancelations */
90879111
inflight = tctx_inflight(tctx);
@@ -9128,7 +9152,10 @@ static int io_uring_flush(struct file *file, void *data)
91289152

91299153
if (ctx->flags & IORING_SETUP_SQPOLL) {
91309154
/* there is only one file note, which is owned by sqo_task */
9131-
WARN_ON_ONCE((ctx->sqo_task == current) ==
9155+
WARN_ON_ONCE(ctx->sqo_task != current &&
9156+
xa_load(&tctx->xa, (unsigned long)file));
9157+
/* sqo_dead check is for when this happens after cancellation */
9158+
WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
91329159
!xa_load(&tctx->xa, (unsigned long)file));
91339160

91349161
io_disable_sqo_submit(ctx);

0 commit comments

Comments
 (0)