Skip to content

Commit ac45abc

Browse files
isilenceaxboe
authored andcommitted
io_uring: remove custom ->func handlers
In preparation of getting rid of work.func, this removes almost all custom instances of it, leaving only io_wq_submit_work() and io_link_work_cb(). And the last one will be dealt later. Nothing fancy, just routinely remove *_finish() function and inline what's left. E.g. remove io_fsync_finish() + inline __io_fsync() into io_fsync(). As no users of io_req_cancelled() are left, delete it as well. The patch adds extra switch lookup on cold-ish path, but that's overweighted by nice diffstat and other benefits of the following patches. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 3af73b2 commit ac45abc

File tree

1 file changed

+27
-112
lines changed

1 file changed

+27
-112
lines changed

fs/io_uring.c

Lines changed: 27 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -2898,77 +2898,25 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
28982898
return 0;
28992899
}
29002900

2901-
static bool io_req_cancelled(struct io_kiocb *req)
2902-
{
2903-
if (req->work.flags & IO_WQ_WORK_CANCEL) {
2904-
req_set_fail_links(req);
2905-
io_cqring_add_event(req, -ECANCELED);
2906-
io_put_req(req);
2907-
return true;
2908-
}
2909-
2910-
return false;
2911-
}
2912-
2913-
static void __io_fsync(struct io_kiocb *req)
2901+
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
29142902
{
29152903
loff_t end = req->sync.off + req->sync.len;
29162904
int ret;
29172905

2906+
/* fsync always requires a blocking context */
2907+
if (force_nonblock)
2908+
return -EAGAIN;
2909+
29182910
ret = vfs_fsync_range(req->file, req->sync.off,
29192911
end > 0 ? end : LLONG_MAX,
29202912
req->sync.flags & IORING_FSYNC_DATASYNC);
29212913
if (ret < 0)
29222914
req_set_fail_links(req);
29232915
io_cqring_add_event(req, ret);
29242916
io_put_req(req);
2925-
}
2926-
2927-
static void io_fsync_finish(struct io_wq_work **workptr)
2928-
{
2929-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2930-
2931-
if (io_req_cancelled(req))
2932-
return;
2933-
__io_fsync(req);
2934-
io_steal_work(req, workptr);
2935-
}
2936-
2937-
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
2938-
{
2939-
/* fsync always requires a blocking context */
2940-
if (force_nonblock) {
2941-
req->work.func = io_fsync_finish;
2942-
return -EAGAIN;
2943-
}
2944-
__io_fsync(req);
29452917
return 0;
29462918
}
29472919

2948-
static void __io_fallocate(struct io_kiocb *req)
2949-
{
2950-
int ret;
2951-
2952-
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2953-
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2954-
req->sync.len);
2955-
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2956-
if (ret < 0)
2957-
req_set_fail_links(req);
2958-
io_cqring_add_event(req, ret);
2959-
io_put_req(req);
2960-
}
2961-
2962-
static void io_fallocate_finish(struct io_wq_work **workptr)
2963-
{
2964-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2965-
2966-
if (io_req_cancelled(req))
2967-
return;
2968-
__io_fallocate(req);
2969-
io_steal_work(req, workptr);
2970-
}
2971-
29722920
static int io_fallocate_prep(struct io_kiocb *req,
29732921
const struct io_uring_sqe *sqe)
29742922
{
@@ -2986,13 +2934,20 @@ static int io_fallocate_prep(struct io_kiocb *req,
29862934

29872935
static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
29882936
{
2937+
int ret;
2938+
29892939
/* fallocate always requiring blocking context */
2990-
if (force_nonblock) {
2991-
req->work.func = io_fallocate_finish;
2940+
if (force_nonblock)
29922941
return -EAGAIN;
2993-
}
29942942

2995-
__io_fallocate(req);
2943+
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2944+
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2945+
req->sync.len);
2946+
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2947+
if (ret < 0)
2948+
req_set_fail_links(req);
2949+
io_cqring_add_event(req, ret);
2950+
io_put_req(req);
29962951
return 0;
29972952
}
29982953

@@ -3489,38 +3444,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
34893444
return 0;
34903445
}
34913446

3492-
static void __io_sync_file_range(struct io_kiocb *req)
3447+
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
34933448
{
34943449
int ret;
34953450

3451+
/* sync_file_range always requires a blocking context */
3452+
if (force_nonblock)
3453+
return -EAGAIN;
3454+
34963455
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
34973456
req->sync.flags);
34983457
if (ret < 0)
34993458
req_set_fail_links(req);
35003459
io_cqring_add_event(req, ret);
35013460
io_put_req(req);
3502-
}
3503-
3504-
3505-
static void io_sync_file_range_finish(struct io_wq_work **workptr)
3506-
{
3507-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3508-
3509-
if (io_req_cancelled(req))
3510-
return;
3511-
__io_sync_file_range(req);
3512-
io_steal_work(req, workptr);
3513-
}
3514-
3515-
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
3516-
{
3517-
/* sync_file_range always requires a blocking context */
3518-
if (force_nonblock) {
3519-
req->work.func = io_sync_file_range_finish;
3520-
return -EAGAIN;
3521-
}
3522-
3523-
__io_sync_file_range(req);
35243461
return 0;
35253462
}
35263463

@@ -3942,49 +3879,27 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
39423879
return 0;
39433880
}
39443881

3945-
static int __io_accept(struct io_kiocb *req, bool force_nonblock)
3882+
static int io_accept(struct io_kiocb *req, bool force_nonblock)
39463883
{
39473884
struct io_accept *accept = &req->accept;
3948-
unsigned file_flags;
3885+
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
39493886
int ret;
39503887

3951-
file_flags = force_nonblock ? O_NONBLOCK : 0;
39523888
ret = __sys_accept4_file(req->file, file_flags, accept->addr,
39533889
accept->addr_len, accept->flags,
39543890
accept->nofile);
39553891
if (ret == -EAGAIN && force_nonblock)
39563892
return -EAGAIN;
3957-
if (ret == -ERESTARTSYS)
3958-
ret = -EINTR;
3959-
if (ret < 0)
3893+
if (ret < 0) {
3894+
if (ret == -ERESTARTSYS)
3895+
ret = -EINTR;
39603896
req_set_fail_links(req);
3897+
}
39613898
io_cqring_add_event(req, ret);
39623899
io_put_req(req);
39633900
return 0;
39643901
}
39653902

3966-
static void io_accept_finish(struct io_wq_work **workptr)
3967-
{
3968-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3969-
3970-
if (io_req_cancelled(req))
3971-
return;
3972-
__io_accept(req, false);
3973-
io_steal_work(req, workptr);
3974-
}
3975-
3976-
static int io_accept(struct io_kiocb *req, bool force_nonblock)
3977-
{
3978-
int ret;
3979-
3980-
ret = __io_accept(req, force_nonblock);
3981-
if (ret == -EAGAIN && force_nonblock) {
3982-
req->work.func = io_accept_finish;
3983-
return -EAGAIN;
3984-
}
3985-
return 0;
3986-
}
3987-
39883903
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
39893904
{
39903905
struct io_connect *conn = &req->connect;

0 commit comments

Comments
 (0)