Skip to content

Commit 74ce6ce

Browse files
committed
io_uring: check for need to re-wait in polled async handling
We added this for just the regular poll requests in commit a6ba632 ("io_uring: retry poll if we got woken with non-matching mask"), we should do the same for the poll handler used pollable async requests. Move the re-wait check and arm into a helper, and call it from io_async_task_func() as well. Signed-off-by: Jens Axboe <[email protected]>
1 parent 8835758 commit 74ce6ce

File tree

1 file changed

+29
-14
lines changed

1 file changed

+29
-14
lines changed

fs/io_uring.c

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4156,6 +4156,26 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
41564156
return 1;
41574157
}
41584158

4159+
static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4160+
__acquires(&req->ctx->completion_lock)
4161+
{
4162+
struct io_ring_ctx *ctx = req->ctx;
4163+
4164+
if (!req->result && !READ_ONCE(poll->canceled)) {
4165+
struct poll_table_struct pt = { ._key = poll->events };
4166+
4167+
req->result = vfs_poll(req->file, &pt) & poll->events;
4168+
}
4169+
4170+
spin_lock_irq(&ctx->completion_lock);
4171+
if (!req->result && !READ_ONCE(poll->canceled)) {
4172+
add_wait_queue(poll->head, &poll->wait);
4173+
return true;
4174+
}
4175+
4176+
return false;
4177+
}
4178+
41594179
static void io_async_task_func(struct callback_head *cb)
41604180
{
41614181
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -4164,14 +4184,16 @@ static void io_async_task_func(struct callback_head *cb)
41644184

41654185
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
41664186

4167-
WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
4168-
4169-
if (hash_hashed(&req->hash_node)) {
4170-
spin_lock_irq(&ctx->completion_lock);
4171-
hash_del(&req->hash_node);
4187+
if (io_poll_rewait(req, &apoll->poll)) {
41724188
spin_unlock_irq(&ctx->completion_lock);
4189+
return;
41734190
}
41744191

4192+
if (hash_hashed(&req->hash_node))
4193+
hash_del(&req->hash_node);
4194+
4195+
spin_unlock_irq(&ctx->completion_lock);
4196+
41754197
/* restore ->work in case we need to retry again */
41764198
memcpy(&req->work, &apoll->work, sizeof(req->work));
41774199

@@ -4436,18 +4458,11 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
44364458
struct io_ring_ctx *ctx = req->ctx;
44374459
struct io_poll_iocb *poll = &req->poll;
44384460

4439-
if (!req->result && !READ_ONCE(poll->canceled)) {
4440-
struct poll_table_struct pt = { ._key = poll->events };
4441-
4442-
req->result = vfs_poll(req->file, &pt) & poll->events;
4443-
}
4444-
4445-
spin_lock_irq(&ctx->completion_lock);
4446-
if (!req->result && !READ_ONCE(poll->canceled)) {
4447-
add_wait_queue(poll->head, &poll->wait);
4461+
if (io_poll_rewait(req, poll)) {
44484462
spin_unlock_irq(&ctx->completion_lock);
44494463
return;
44504464
}
4465+
44514466
hash_del(&req->hash_node);
44524467
io_poll_complete(req, req->result, 0);
44534468
req->flags |= REQ_F_COMP_LOCKED;

0 commit comments

Comments
 (0)