Skip to content

Commit 1f68f31

Browse files
committed
Merge tag 'io_uring-5.8-2020-07-24' of git://git.kernel.dk/linux-block into master
Pull io_uring fixes from Jens Axboe: - Fix discrepancy in how sqe->flags are treated for a few requests, this makes it consistent (Daniele) - Ensure that poll driven retry works with double waitqueue poll users - Fix a missing io_req_init_async() (Pavel) * tag 'io_uring-5.8-2020-07-24' of git://git.kernel.dk/linux-block: io_uring: missed req_init_async() for IOSQE_ASYNC io_uring: always allow drain/link/hardlink/async sqe flags io_uring: ensure double poll additions work with both request types
2 parents 5a0b8af + 3e863ea commit 1f68f31

File tree

1 file changed

+36
-25
lines changed

1 file changed

+36
-25
lines changed

fs/io_uring.c

Lines changed: 36 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,7 @@ enum {
605605

606606
struct async_poll {
607607
struct io_poll_iocb poll;
608+
struct io_poll_iocb *double_poll;
608609
struct io_wq_work work;
609610
};
610611

@@ -4159,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
41594160
return false;
41604161
}
41614162

4162-
static void io_poll_remove_double(struct io_kiocb *req)
4163+
static void io_poll_remove_double(struct io_kiocb *req, void *data)
41634164
{
4164-
struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4165+
struct io_poll_iocb *poll = data;
41654166

41664167
lockdep_assert_held(&req->ctx->completion_lock);
41674168

@@ -4181,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
41814182
{
41824183
struct io_ring_ctx *ctx = req->ctx;
41834184

4184-
io_poll_remove_double(req);
4185+
io_poll_remove_double(req, req->io);
41854186
req->poll.done = true;
41864187
io_cqring_fill_event(req, error ? error : mangle_poll(mask));
41874188
io_commit_cqring(ctx);
@@ -4224,21 +4225,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
42244225
int sync, void *key)
42254226
{
42264227
struct io_kiocb *req = wait->private;
4227-
struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4228+
struct io_poll_iocb *poll = req->apoll->double_poll;
42284229
__poll_t mask = key_to_poll(key);
42294230

42304231
/* for instances that support it check for an event match first: */
42314232
if (mask && !(mask & poll->events))
42324233
return 0;
42334234

4234-
if (req->poll.head) {
4235+
if (poll && poll->head) {
42354236
bool done;
42364237

4237-
spin_lock(&req->poll.head->lock);
4238-
done = list_empty(&req->poll.wait.entry);
4238+
spin_lock(&poll->head->lock);
4239+
done = list_empty(&poll->wait.entry);
42394240
if (!done)
4240-
list_del_init(&req->poll.wait.entry);
4241-
spin_unlock(&req->poll.head->lock);
4241+
list_del_init(&poll->wait.entry);
4242+
spin_unlock(&poll->head->lock);
42424243
if (!done)
42434244
__io_async_wake(req, poll, mask, io_poll_task_func);
42444245
}
@@ -4258,7 +4259,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
42584259
}
42594260

42604261
static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4261-
struct wait_queue_head *head)
4262+
struct wait_queue_head *head,
4263+
struct io_poll_iocb **poll_ptr)
42624264
{
42634265
struct io_kiocb *req = pt->req;
42644266

@@ -4269,7 +4271,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
42694271
*/
42704272
if (unlikely(poll->head)) {
42714273
/* already have a 2nd entry, fail a third attempt */
4272-
if (req->io) {
4274+
if (*poll_ptr) {
42734275
pt->error = -EINVAL;
42744276
return;
42754277
}
@@ -4281,7 +4283,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
42814283
io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
42824284
refcount_inc(&req->refs);
42834285
poll->wait.private = req;
4284-
req->io = (void *) poll;
4286+
*poll_ptr = poll;
42854287
}
42864288

42874289
pt->error = 0;
@@ -4293,8 +4295,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
42934295
struct poll_table_struct *p)
42944296
{
42954297
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4298+
struct async_poll *apoll = pt->req->apoll;
42964299

4297-
__io_queue_proc(&pt->req->apoll->poll, pt, head);
4300+
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
42984301
}
42994302

43004303
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
@@ -4344,11 +4347,13 @@ static void io_async_task_func(struct callback_head *cb)
43444347
}
43454348
}
43464349

4350+
io_poll_remove_double(req, apoll->double_poll);
43474351
spin_unlock_irq(&ctx->completion_lock);
43484352

43494353
/* restore ->work in case we need to retry again */
43504354
if (req->flags & REQ_F_WORK_INITIALIZED)
43514355
memcpy(&req->work, &apoll->work, sizeof(req->work));
4356+
kfree(apoll->double_poll);
43524357
kfree(apoll);
43534358

43544359
if (!canceled) {
@@ -4436,7 +4441,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
44364441
struct async_poll *apoll;
44374442
struct io_poll_table ipt;
44384443
__poll_t mask, ret;
4439-
bool had_io;
44404444

44414445
if (!req->file || !file_can_poll(req->file))
44424446
return false;
@@ -4448,11 +4452,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
44484452
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
44494453
if (unlikely(!apoll))
44504454
return false;
4455+
apoll->double_poll = NULL;
44514456

44524457
req->flags |= REQ_F_POLLED;
44534458
if (req->flags & REQ_F_WORK_INITIALIZED)
44544459
memcpy(&apoll->work, &req->work, sizeof(req->work));
4455-
had_io = req->io != NULL;
44564460

44574461
io_get_req_task(req);
44584462
req->apoll = apoll;
@@ -4470,13 +4474,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
44704474
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
44714475
io_async_wake);
44724476
if (ret) {
4473-
ipt.error = 0;
4474-
/* only remove double add if we did it here */
4475-
if (!had_io)
4476-
io_poll_remove_double(req);
4477+
io_poll_remove_double(req, apoll->double_poll);
44774478
spin_unlock_irq(&ctx->completion_lock);
44784479
if (req->flags & REQ_F_WORK_INITIALIZED)
44794480
memcpy(&req->work, &apoll->work, sizeof(req->work));
4481+
kfree(apoll->double_poll);
44804482
kfree(apoll);
44814483
return false;
44824484
}
@@ -4507,11 +4509,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
45074509
bool do_complete;
45084510

45094511
if (req->opcode == IORING_OP_POLL_ADD) {
4510-
io_poll_remove_double(req);
4512+
io_poll_remove_double(req, req->io);
45114513
do_complete = __io_poll_remove_one(req, &req->poll);
45124514
} else {
45134515
struct async_poll *apoll = req->apoll;
45144516

4517+
io_poll_remove_double(req, apoll->double_poll);
4518+
45154519
/* non-poll requests have submit ref still */
45164520
do_complete = __io_poll_remove_one(req, &apoll->poll);
45174521
if (do_complete) {
@@ -4524,6 +4528,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
45244528
if (req->flags & REQ_F_WORK_INITIALIZED)
45254529
memcpy(&req->work, &apoll->work,
45264530
sizeof(req->work));
4531+
kfree(apoll->double_poll);
45274532
kfree(apoll);
45284533
}
45294534
}
@@ -4624,7 +4629,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
46244629
{
46254630
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
46264631

4627-
__io_queue_proc(&pt->req->poll, pt, head);
4632+
__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
46284633
}
46294634

46304635
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -4732,7 +4737,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
47324737
{
47334738
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
47344739
return -EINVAL;
4735-
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4740+
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4741+
return -EINVAL;
4742+
if (sqe->ioprio || sqe->buf_index || sqe->len)
47364743
return -EINVAL;
47374744

47384745
req->timeout.addr = READ_ONCE(sqe->addr);
@@ -4910,8 +4917,9 @@ static int io_async_cancel_prep(struct io_kiocb *req,
49104917
{
49114918
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
49124919
return -EINVAL;
4913-
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4914-
sqe->cancel_flags)
4920+
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4921+
return -EINVAL;
4922+
if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
49154923
return -EINVAL;
49164924

49174925
req->cancel.addr = READ_ONCE(sqe->addr);
@@ -4929,7 +4937,9 @@ static int io_async_cancel(struct io_kiocb *req)
49294937
static int io_files_update_prep(struct io_kiocb *req,
49304938
const struct io_uring_sqe *sqe)
49314939
{
4932-
if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4940+
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4941+
return -EINVAL;
4942+
if (sqe->ioprio || sqe->rw_flags)
49334943
return -EINVAL;
49344944

49354945
req->files_update.offset = READ_ONCE(sqe->off);
@@ -5720,6 +5730,7 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
57205730
* Never try inline submit of IOSQE_ASYNC is set, go straight
57215731
* to async execution.
57225732
*/
5733+
io_req_init_async(req);
57235734
req->work.flags |= IO_WQ_WORK_CONCURRENT;
57245735
io_queue_async_work(req);
57255736
} else {

0 commit comments

Comments
 (0)