Skip to content

Commit 3af73b2

Browse files
isilenceaxboe
authored andcommitted
io_uring: don't derive close state from ->func
Relying on having a specific work.func is dangerous, even if an opcode handler set it itself. E.g. io_wq_assign_next() can modify it. io_close() sets a custom work.func to indicate that __close_fd_get_file() was already called. Fortunately, there is no bugs with io_wq_assign_next() and close yet. Still, do it safe and always be prepared to be called through io_wq_submit_work(). Zero req->close.put_file in prep, and call __close_fd_get_file() IFF it's NULL. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent a8c73c1 commit 3af73b2

File tree

1 file changed

+17
-33
lines changed

1 file changed

+17
-33
lines changed

fs/io_uring.c

Lines changed: 17 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -3437,53 +3437,37 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
34373437
req->close.fd == req->ctx->ring_fd)
34383438
return -EBADF;
34393439

3440+
req->close.put_file = NULL;
34403441
return 0;
34413442
}
34423443

3443-
/* only called when __close_fd_get_file() is done */
3444-
static void __io_close_finish(struct io_kiocb *req)
3445-
{
3446-
int ret;
3447-
3448-
ret = filp_close(req->close.put_file, req->work.files);
3449-
if (ret < 0)
3450-
req_set_fail_links(req);
3451-
io_cqring_add_event(req, ret);
3452-
fput(req->close.put_file);
3453-
io_put_req(req);
3454-
}
3455-
3456-
static void io_close_finish(struct io_wq_work **workptr)
3457-
{
3458-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3459-
3460-
/* not cancellable, don't do io_req_cancelled() */
3461-
__io_close_finish(req);
3462-
io_steal_work(req, workptr);
3463-
}
3464-
34653444
static int io_close(struct io_kiocb *req, bool force_nonblock)
34663445
{
3446+
struct io_close *close = &req->close;
34673447
int ret;
34683448

3469-
req->close.put_file = NULL;
3470-
ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
3471-
if (ret < 0)
3472-
return (ret == -ENOENT) ? -EBADF : ret;
3449+
/* might be already done during nonblock submission */
3450+
if (!close->put_file) {
3451+
ret = __close_fd_get_file(close->fd, &close->put_file);
3452+
if (ret < 0)
3453+
return (ret == -ENOENT) ? -EBADF : ret;
3454+
}
34733455

34743456
/* if the file has a flush method, be safe and punt to async */
3475-
if (req->close.put_file->f_op->flush && force_nonblock) {
3457+
if (close->put_file->f_op->flush && force_nonblock) {
34763458
/* avoid grabbing files - we don't need the files */
34773459
req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
3478-
req->work.func = io_close_finish;
34793460
return -EAGAIN;
34803461
}
34813462

3482-
/*
3483-
* No ->flush(), safely close from here and just punt the
3484-
* fput() to async context.
3485-
*/
3486-
__io_close_finish(req);
3463+
/* No ->flush() or already async, safely close from here */
3464+
ret = filp_close(close->put_file, req->work.files);
3465+
if (ret < 0)
3466+
req_set_fail_links(req);
3467+
io_cqring_add_event(req, ret);
3468+
fput(close->put_file);
3469+
close->put_file = NULL;
3470+
io_put_req(req);
34873471
return 0;
34883472
}
34893473

0 commit comments

Comments
 (0)