Skip to content

Commit 55d57b3

Browse files
committed
io_uring/poll: unify poll waitqueue entry and list removal
For some cases, the order in which the waitq entry list and head writing happens is important, for others it doesn't really matter. But it's somewhat confusing to have them spread out over the file. Abstract out the nicely documented code in io_pollfree_wake() and move it into a helper, and use that helper consistently rather than having other call sites manually do the same thing. While at it, correct a comment function name as well. Signed-off-by: Jens Axboe <[email protected]>
1 parent a4c694b commit 55d57b3

File tree

1 file changed

+22
-21
lines changed

1 file changed

+22
-21
lines changed

io_uring/poll.c

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -138,14 +138,32 @@ static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
138138
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139139
}
140140

141+
static void io_poll_remove_waitq(struct io_poll *poll)
142+
{
143+
/*
144+
* If the waitqueue is being freed early but someone is already holds
145+
* ownership over it, we have to tear down the request as best we can.
146+
* That means immediately removing the request from its waitqueue and
147+
* preventing all further accesses to the waitqueue via the request.
148+
*/
149+
list_del_init(&poll->wait.entry);
150+
151+
/*
152+
* Careful: this *must* be the last step, since as soon as req->head is
153+
* NULL'ed out, the request can be completed and freed, since
154+
* io_poll_remove_entry() will no longer need to take the waitqueue
155+
* lock.
156+
*/
157+
smp_store_release(&poll->head, NULL);
158+
}
159+
141160
static inline void io_poll_remove_entry(struct io_poll *poll)
142161
{
143162
struct wait_queue_head *head = smp_load_acquire(&poll->head);
144163

145164
if (head) {
146165
spin_lock_irq(&head->lock);
147-
list_del_init(&poll->wait.entry);
148-
poll->head = NULL;
166+
io_poll_remove_waitq(poll);
149167
spin_unlock_irq(&head->lock);
150168
}
151169
}
@@ -368,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
368386
io_poll_mark_cancelled(req);
369387
/* we have to kick tw in case it's not already */
370388
io_poll_execute(req, 0);
371-
372-
/*
373-
* If the waitqueue is being freed early but someone is already
374-
* holds ownership over it, we have to tear down the request as
375-
* best we can. That means immediately removing the request from
376-
* its waitqueue and preventing all further accesses to the
377-
* waitqueue via the request.
378-
*/
379-
list_del_init(&poll->wait.entry);
380-
381-
/*
382-
* Careful: this *must* be the last step, since as soon
383-
* as req->head is NULL'ed out, the request can be
384-
* completed and freed, since aio_poll_complete_work()
385-
* will no longer need to take the waitqueue lock.
386-
*/
387-
smp_store_release(&poll->head, NULL);
389+
io_poll_remove_waitq(poll);
388390
return 1;
389391
}
390392

@@ -413,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
413415

414416
/* optional, saves extra locking for removal in tw handler */
415417
if (mask && poll->events & EPOLLONESHOT) {
416-
list_del_init(&poll->wait.entry);
417-
poll->head = NULL;
418+
io_poll_remove_waitq(poll);
418419
if (wqe_is_double(wait))
419420
req->flags &= ~REQ_F_DOUBLE_POLL;
420421
else

0 commit comments

Comments
 (0)