Skip to content

Commit 06b76d4

Browse files
committed
io_uring: standardize the prep methods
We currently have a mix of use cases. Most of the newer ones are pretty uniform, but we have some older ones that use different calling calling conventions. This is confusing. For the opcodes that currently rely on the req->io->sqe copy saving them from reuse, add a request type struct in the io_kiocb command union to store the data they need. Prepare for all opcodes having a standard prep method, so we can call it in a uniform fashion and outside of the opcode handler. This is in preparation for passing in the 'sqe' pointer, rather than storing it in the io_kiocb. Once we have uniform prep handlers, we can leave all the prep work to that part, and not even pass in the sqe to the opcode handler. This ensures that we don't reuse sqe data inadvertently. Signed-off-by: Jens Axboe <[email protected]>
1 parent 26a6167 commit 06b76d4

File tree

1 file changed

+63
-65
lines changed

1 file changed

+63
-65
lines changed

fs/io_uring.c

Lines changed: 63 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,6 @@ struct io_async_rw {
371371
};
372372

373373
struct io_async_ctx {
374-
struct io_uring_sqe sqe;
375374
union {
376375
struct io_async_rw rw;
377376
struct io_async_msghdr msg;
@@ -433,7 +432,6 @@ struct io_kiocb {
433432
#define REQ_F_INFLIGHT 16384 /* on inflight list */
434433
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
435434
#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
436-
#define REQ_F_PREPPED 131072 /* request already opcode prepared */
437435
u64 user_data;
438436
u32 result;
439437
u32 sequence;
@@ -1501,6 +1499,8 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
15011499
unsigned ioprio;
15021500
int ret;
15031501

1502+
if (!sqe)
1503+
return 0;
15041504
if (!req->file)
15051505
return -EBADF;
15061506

@@ -1552,6 +1552,7 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
15521552
/* we own ->private, reuse it for the buffer index */
15531553
req->rw.kiocb.private = (void *) (unsigned long)
15541554
READ_ONCE(req->sqe->buf_index);
1555+
req->sqe = NULL;
15551556
return 0;
15561557
}
15571558

@@ -1773,13 +1774,7 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
17731774
static int io_alloc_async_ctx(struct io_kiocb *req)
17741775
{
17751776
req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
1776-
if (req->io) {
1777-
memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
1778-
req->sqe = &req->io->sqe;
1779-
return 0;
1780-
}
1781-
1782-
return 1;
1777+
return req->io == NULL;
17831778
}
17841779

17851780
static void io_rw_async(struct io_wq_work **workptr)
@@ -1810,12 +1805,14 @@ static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
18101805
{
18111806
ssize_t ret;
18121807

1813-
ret = io_prep_rw(req, force_nonblock);
1814-
if (ret)
1815-
return ret;
1808+
if (req->sqe) {
1809+
ret = io_prep_rw(req, force_nonblock);
1810+
if (ret)
1811+
return ret;
18161812

1817-
if (unlikely(!(req->file->f_mode & FMODE_READ)))
1818-
return -EBADF;
1813+
if (unlikely(!(req->file->f_mode & FMODE_READ)))
1814+
return -EBADF;
1815+
}
18191816

18201817
return io_import_iovec(READ, req, iovec, iter);
18211818
}
@@ -1829,15 +1826,9 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
18291826
size_t iov_count;
18301827
ssize_t io_size, ret;
18311828

1832-
if (!req->io) {
1833-
ret = io_read_prep(req, &iovec, &iter, force_nonblock);
1834-
if (ret < 0)
1835-
return ret;
1836-
} else {
1837-
ret = io_import_iovec(READ, req, &iovec, &iter);
1838-
if (ret < 0)
1839-
return ret;
1840-
}
1829+
ret = io_read_prep(req, &iovec, &iter, force_nonblock);
1830+
if (ret < 0)
1831+
return ret;
18411832

18421833
/* Ensure we clear previously set non-block flag */
18431834
if (!force_nonblock)
@@ -1901,12 +1892,14 @@ static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
19011892
{
19021893
ssize_t ret;
19031894

1904-
ret = io_prep_rw(req, force_nonblock);
1905-
if (ret)
1906-
return ret;
1895+
if (req->sqe) {
1896+
ret = io_prep_rw(req, force_nonblock);
1897+
if (ret)
1898+
return ret;
19071899

1908-
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
1909-
return -EBADF;
1900+
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
1901+
return -EBADF;
1902+
}
19101903

19111904
return io_import_iovec(WRITE, req, iovec, iter);
19121905
}
@@ -1920,15 +1913,9 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
19201913
size_t iov_count;
19211914
ssize_t ret, io_size;
19221915

1923-
if (!req->io) {
1924-
ret = io_write_prep(req, &iovec, &iter, force_nonblock);
1925-
if (ret < 0)
1926-
return ret;
1927-
} else {
1928-
ret = io_import_iovec(WRITE, req, &iovec, &iter);
1929-
if (ret < 0)
1930-
return ret;
1931-
}
1916+
ret = io_write_prep(req, &iovec, &iter, force_nonblock);
1917+
if (ret < 0)
1918+
return ret;
19321919

19331920
/* Ensure we clear previously set non-block flag */
19341921
if (!force_nonblock)
@@ -2013,7 +2000,7 @@ static int io_prep_fsync(struct io_kiocb *req)
20132000
const struct io_uring_sqe *sqe = req->sqe;
20142001
struct io_ring_ctx *ctx = req->ctx;
20152002

2016-
if (req->flags & REQ_F_PREPPED)
2003+
if (!req->sqe)
20172004
return 0;
20182005
if (!req->file)
20192006
return -EBADF;
@@ -2029,7 +2016,7 @@ static int io_prep_fsync(struct io_kiocb *req)
20292016

20302017
req->sync.off = READ_ONCE(sqe->off);
20312018
req->sync.len = READ_ONCE(sqe->len);
2032-
req->flags |= REQ_F_PREPPED;
2019+
req->sqe = NULL;
20332020
return 0;
20342021
}
20352022

@@ -2095,7 +2082,7 @@ static int io_prep_sfr(struct io_kiocb *req)
20952082
const struct io_uring_sqe *sqe = req->sqe;
20962083
struct io_ring_ctx *ctx = req->ctx;
20972084

2098-
if (req->flags & REQ_F_PREPPED)
2085+
if (!sqe)
20992086
return 0;
21002087
if (!req->file)
21012088
return -EBADF;
@@ -2108,7 +2095,7 @@ static int io_prep_sfr(struct io_kiocb *req)
21082095
req->sync.off = READ_ONCE(sqe->off);
21092096
req->sync.len = READ_ONCE(sqe->len);
21102097
req->sync.flags = READ_ONCE(sqe->sync_range_flags);
2111-
req->flags |= REQ_F_PREPPED;
2098+
req->sqe = NULL;
21122099
return 0;
21132100
}
21142101

@@ -2173,12 +2160,17 @@ static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
21732160
#if defined(CONFIG_NET)
21742161
const struct io_uring_sqe *sqe = req->sqe;
21752162
struct io_sr_msg *sr = &req->sr_msg;
2163+
int ret;
21762164

2165+
if (!sqe)
2166+
return 0;
21772167
sr->msg_flags = READ_ONCE(sqe->msg_flags);
21782168
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
21792169
io->msg.iov = io->msg.fast_iov;
2180-
return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
2170+
ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
21812171
&io->msg.iov);
2172+
req->sqe = NULL;
2173+
return ret;
21822174
#else
21832175
return -EOPNOTSUPP;
21842176
#endif
@@ -2253,12 +2245,18 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
22532245
{
22542246
#if defined(CONFIG_NET)
22552247
struct io_sr_msg *sr = &req->sr_msg;
2248+
int ret;
2249+
2250+
if (!req->sqe)
2251+
return 0;
22562252

22572253
sr->msg_flags = READ_ONCE(req->sqe->msg_flags);
22582254
sr->msg = u64_to_user_ptr(READ_ONCE(req->sqe->addr));
22592255
io->msg.iov = io->msg.fast_iov;
2260-
return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
2256+
ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
22612257
&io->msg.uaddr, &io->msg.iov);
2258+
req->sqe = NULL;
2259+
return ret;
22622260
#else
22632261
return -EOPNOTSUPP;
22642262
#endif
@@ -2336,7 +2334,7 @@ static int io_accept_prep(struct io_kiocb *req)
23362334
const struct io_uring_sqe *sqe = req->sqe;
23372335
struct io_accept *accept = &req->accept;
23382336

2339-
if (req->flags & REQ_F_PREPPED)
2337+
if (!req->sqe)
23402338
return 0;
23412339

23422340
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
@@ -2347,7 +2345,7 @@ static int io_accept_prep(struct io_kiocb *req)
23472345
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
23482346
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
23492347
accept->flags = READ_ONCE(sqe->accept_flags);
2350-
req->flags |= REQ_F_PREPPED;
2348+
req->sqe = NULL;
23512349
return 0;
23522350
#else
23532351
return -EOPNOTSUPP;
@@ -2416,16 +2414,21 @@ static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
24162414
{
24172415
#if defined(CONFIG_NET)
24182416
const struct io_uring_sqe *sqe = req->sqe;
2417+
int ret;
24192418

2419+
if (!sqe)
2420+
return 0;
24202421
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
24212422
return -EINVAL;
24222423
if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
24232424
return -EINVAL;
24242425

24252426
req->connect.addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
24262427
req->connect.addr_len = READ_ONCE(sqe->addr2);
2427-
return move_addr_to_kernel(req->connect.addr, req->connect.addr_len,
2428+
ret = move_addr_to_kernel(req->connect.addr, req->connect.addr_len,
24282429
&io->connect.address);
2430+
req->sqe = NULL;
2431+
return ret;
24292432
#else
24302433
return -EOPNOTSUPP;
24312434
#endif
@@ -2526,7 +2529,7 @@ static int io_poll_remove_prep(struct io_kiocb *req)
25262529
{
25272530
const struct io_uring_sqe *sqe = req->sqe;
25282531

2529-
if (req->flags & REQ_F_PREPPED)
2532+
if (!sqe)
25302533
return 0;
25312534
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
25322535
return -EINVAL;
@@ -2535,7 +2538,7 @@ static int io_poll_remove_prep(struct io_kiocb *req)
25352538
return -EINVAL;
25362539

25372540
req->poll.addr = READ_ONCE(sqe->addr);
2538-
req->flags |= REQ_F_PREPPED;
2541+
req->sqe = NULL;
25392542
return 0;
25402543
}
25412544

@@ -2696,7 +2699,7 @@ static int io_poll_add_prep(struct io_kiocb *req)
26962699
struct io_poll_iocb *poll = &req->poll;
26972700
u16 events;
26982701

2699-
if (req->flags & REQ_F_PREPPED)
2702+
if (!sqe)
27002703
return 0;
27012704
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
27022705
return -EINVAL;
@@ -2705,9 +2708,9 @@ static int io_poll_add_prep(struct io_kiocb *req)
27052708
if (!poll->file)
27062709
return -EBADF;
27072710

2708-
req->flags |= REQ_F_PREPPED;
27092711
events = READ_ONCE(sqe->poll_events);
27102712
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
2713+
req->sqe = NULL;
27112714
return 0;
27122715
}
27132716

@@ -2845,7 +2848,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
28452848
{
28462849
const struct io_uring_sqe *sqe = req->sqe;
28472850

2848-
if (req->flags & REQ_F_PREPPED)
2851+
if (!sqe)
28492852
return 0;
28502853
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
28512854
return -EINVAL;
@@ -2857,7 +2860,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
28572860
if (req->timeout.flags)
28582861
return -EINVAL;
28592862

2860-
req->flags |= REQ_F_PREPPED;
2863+
req->sqe = NULL;
28612864
return 0;
28622865
}
28632866

@@ -2893,6 +2896,8 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
28932896
struct io_timeout_data *data;
28942897
unsigned flags;
28952898

2899+
if (!sqe)
2900+
return 0;
28962901
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
28972902
return -EINVAL;
28982903
if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
@@ -2921,6 +2926,7 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
29212926
data->mode = HRTIMER_MODE_REL;
29222927

29232928
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
2929+
req->sqe = NULL;
29242930
return 0;
29252931
}
29262932

@@ -2933,13 +2939,9 @@ static int io_timeout(struct io_kiocb *req)
29332939
unsigned span = 0;
29342940
int ret;
29352941

2936-
if (!req->io) {
2937-
if (io_alloc_async_ctx(req))
2938-
return -ENOMEM;
2939-
ret = io_timeout_prep(req, req->io, false);
2940-
if (ret)
2941-
return ret;
2942-
}
2942+
ret = io_timeout_prep(req, req->io, false);
2943+
if (ret)
2944+
return ret;
29432945
data = &req->io->timeout;
29442946

29452947
/*
@@ -3069,16 +3071,16 @@ static int io_async_cancel_prep(struct io_kiocb *req)
30693071
{
30703072
const struct io_uring_sqe *sqe = req->sqe;
30713073

3072-
if (req->flags & REQ_F_PREPPED)
3074+
if (!sqe)
30733075
return 0;
30743076
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
30753077
return -EINVAL;
30763078
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
30773079
sqe->cancel_flags)
30783080
return -EINVAL;
30793081

3080-
req->flags |= REQ_F_PREPPED;
30813082
req->cancel.addr = READ_ONCE(sqe->addr);
3083+
req->sqe = NULL;
30823084
return 0;
30833085
}
30843086

@@ -3213,13 +3215,9 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
32133215
ret = io_nop(req);
32143216
break;
32153217
case IORING_OP_READV:
3216-
if (unlikely(req->sqe->buf_index))
3217-
return -EINVAL;
32183218
ret = io_read(req, nxt, force_nonblock);
32193219
break;
32203220
case IORING_OP_WRITEV:
3221-
if (unlikely(req->sqe->buf_index))
3222-
return -EINVAL;
32233221
ret = io_write(req, nxt, force_nonblock);
32243222
break;
32253223
case IORING_OP_READ_FIXED:

0 commit comments

Comments
 (0)