Skip to content

Commit cf01853

Browse files
committed
Merge tag 'io_uring-5.7-2020-05-01' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: - Fix for statx not grabbing the file table, making AT_EMPTY_PATH fail - Cover a few cases where async poll can handle retry, eliminating the need for an async thread - fallback request busy/free fix (Bijan) - syzbot reported SQPOLL thread exit fix for non-preempt (Xiaoguang) - Fix extra put of req for sync_file_range (Pavel) - Always punt splice async. We'll improve this for 5.8, but wanted to eliminate the inode mutex lock from the non-blocking path for 5.7 (Pavel) * tag 'io_uring-5.7-2020-05-01' of git://git.kernel.dk/linux-block: io_uring: punt splice async because of inode mutex io_uring: check non-sync defer_list carefully io_uring: fix extra put in sync_file_range() io_uring: use cond_resched() in io_ring_ctx_wait_and_kill() io_uring: use proper references for fallback_req locking io_uring: only force async punt if poll based retry can't handle it io_uring: enable poll retry for any file with ->read_iter / ->write_iter io_uring: statx must grab the file table for valid fd
2 parents 052c467 + 2fb3e82 commit cf01853

File tree

1 file changed

+31
-27
lines changed

1 file changed

+31
-27
lines changed

fs/io_uring.c

Lines changed: 31 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -524,6 +524,7 @@ enum {
524524
REQ_F_OVERFLOW_BIT,
525525
REQ_F_POLLED_BIT,
526526
REQ_F_BUFFER_SELECTED_BIT,
527+
REQ_F_NO_FILE_TABLE_BIT,
527528

528529
/* not a real bit, just to check we're not overflowing the space */
529530
__REQ_F_LAST_BIT,
@@ -577,6 +578,8 @@ enum {
577578
REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
578579
/* buffer already selected */
579580
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
581+
/* doesn't need file table for this request */
582+
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
580583
};
581584

582585
struct async_poll {
@@ -799,6 +802,7 @@ static const struct io_op_def io_op_defs[] = {
799802
.needs_file = 1,
800803
.fd_non_neg = 1,
801804
.needs_fs = 1,
805+
.file_table = 1,
802806
},
803807
[IORING_OP_READ] = {
804808
.needs_mm = 1,
@@ -1291,7 +1295,7 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
12911295
struct io_kiocb *req;
12921296

12931297
req = ctx->fallback_req;
1294-
if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
1298+
if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
12951299
return req;
12961300

12971301
return NULL;
@@ -1378,7 +1382,7 @@ static void __io_free_req(struct io_kiocb *req)
13781382
if (likely(!io_is_fallback_req(req)))
13791383
kmem_cache_free(req_cachep, req);
13801384
else
1381-
clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
1385+
clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
13821386
}
13831387

13841388
struct req_batch {
@@ -2034,7 +2038,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
20342038
* any file. For now, just ensure that anything potentially problematic is done
20352039
* inline.
20362040
*/
2037-
static bool io_file_supports_async(struct file *file)
2041+
static bool io_file_supports_async(struct file *file, int rw)
20382042
{
20392043
umode_t mode = file_inode(file)->i_mode;
20402044

@@ -2043,7 +2047,13 @@ static bool io_file_supports_async(struct file *file)
20432047
if (S_ISREG(mode) && file->f_op != &io_uring_fops)
20442048
return true;
20452049

2046-
return false;
2050+
if (!(file->f_mode & FMODE_NOWAIT))
2051+
return false;
2052+
2053+
if (rw == READ)
2054+
return file->f_op->read_iter != NULL;
2055+
2056+
return file->f_op->write_iter != NULL;
20472057
}
20482058

20492059
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2571,7 +2581,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
25712581
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
25722582
* we know to async punt it even if it was opened O_NONBLOCK
25732583
*/
2574-
if (force_nonblock && !io_file_supports_async(req->file))
2584+
if (force_nonblock && !io_file_supports_async(req->file, READ))
25752585
goto copy_iov;
25762586

25772587
iov_count = iov_iter_count(&iter);
@@ -2594,7 +2604,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
25942604
if (ret)
25952605
goto out_free;
25962606
/* any defer here is final, must blocking retry */
2597-
if (!(req->flags & REQ_F_NOWAIT))
2607+
if (!(req->flags & REQ_F_NOWAIT) &&
2608+
!file_can_poll(req->file))
25982609
req->flags |= REQ_F_MUST_PUNT;
25992610
return -EAGAIN;
26002611
}
@@ -2662,7 +2673,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
26622673
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
26632674
* we know to async punt it even if it was opened O_NONBLOCK
26642675
*/
2665-
if (force_nonblock && !io_file_supports_async(req->file))
2676+
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
26662677
goto copy_iov;
26672678

26682679
/* file path doesn't support NOWAIT for non-direct_IO */
@@ -2716,7 +2727,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
27162727
if (ret)
27172728
goto out_free;
27182729
/* any defer here is final, must blocking retry */
2719-
req->flags |= REQ_F_MUST_PUNT;
2730+
if (!file_can_poll(req->file))
2731+
req->flags |= REQ_F_MUST_PUNT;
27202732
return -EAGAIN;
27212733
}
27222734
}
@@ -2756,15 +2768,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
27562768
return 0;
27572769
}
27582770

2759-
static bool io_splice_punt(struct file *file)
2760-
{
2761-
if (get_pipe_info(file))
2762-
return false;
2763-
if (!io_file_supports_async(file))
2764-
return true;
2765-
return !(file->f_flags & O_NONBLOCK);
2766-
}
2767-
27682771
static int io_splice(struct io_kiocb *req, bool force_nonblock)
27692772
{
27702773
struct io_splice *sp = &req->splice;
@@ -2774,11 +2777,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
27742777
loff_t *poff_in, *poff_out;
27752778
long ret;
27762779

2777-
if (force_nonblock) {
2778-
if (io_splice_punt(in) || io_splice_punt(out))
2779-
return -EAGAIN;
2780-
flags |= SPLICE_F_NONBLOCK;
2781-
}
2780+
if (force_nonblock)
2781+
return -EAGAIN;
27822782

27832783
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
27842784
poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
@@ -3355,8 +3355,12 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
33553355
struct kstat stat;
33563356
int ret;
33573357

3358-
if (force_nonblock)
3358+
if (force_nonblock) {
3359+
/* only need file table for an actual valid fd */
3360+
if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3361+
req->flags |= REQ_F_NO_FILE_TABLE;
33593362
return -EAGAIN;
3363+
}
33603364

33613365
if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
33623366
return -EINVAL;
@@ -3502,7 +3506,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
35023506
if (io_req_cancelled(req))
35033507
return;
35043508
__io_sync_file_range(req);
3505-
io_put_req(req); /* put submission ref */
3509+
io_steal_work(req, workptr);
35063510
}
35073511

35083512
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
@@ -5015,7 +5019,7 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
50155019
int ret;
50165020

50175021
/* Still need defer if there is pending req in defer list. */
5018-
if (!req_need_defer(req) && list_empty(&ctx->defer_list))
5022+
if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
50195023
return 0;
50205024

50215025
if (!req->io && io_alloc_async_ctx(req))
@@ -5429,7 +5433,7 @@ static int io_grab_files(struct io_kiocb *req)
54295433
int ret = -EBADF;
54305434
struct io_ring_ctx *ctx = req->ctx;
54315435

5432-
if (req->work.files)
5436+
if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
54335437
return 0;
54345438
if (!ctx->ring_file)
54355439
return -EBADF;
@@ -7327,7 +7331,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
73277331
* it could cause shutdown to hang.
73287332
*/
73297333
while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
7330-
cpu_relax();
7334+
cond_resched();
73317335

73327336
io_kill_timeouts(ctx);
73337337
io_poll_remove_all(ctx);

0 commit comments

Comments
 (0)