Skip to content

Commit ef4ff58

Browse files
isilenceaxboe
authored andcommitted
io_uring: move all request init code in one place
Requests initialisation is scattered across several functions, namely io_init_req(), io_submit_sqes(), io_submit_sqe(). Put it in io_init_req() for better data locality and code clarity. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent dea3b49 commit ef4ff58

File tree

1 file changed

+52
-52
lines changed

1 file changed

+52
-52
lines changed

fs/io_uring.c

Lines changed: 52 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -5607,44 +5607,11 @@ static inline void io_queue_link_head(struct io_kiocb *req)
56075607
io_queue_sqe(req, NULL);
56085608
}
56095609

5610-
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5611-
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5612-
IOSQE_BUFFER_SELECT)
5613-
56145610
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
56155611
struct io_submit_state *state, struct io_kiocb **link)
56165612
{
56175613
struct io_ring_ctx *ctx = req->ctx;
5618-
unsigned int sqe_flags;
5619-
int ret, id, fd;
5620-
5621-
sqe_flags = READ_ONCE(sqe->flags);
5622-
5623-
/* enforce forwards compatibility on users */
5624-
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5625-
return -EINVAL;
5626-
5627-
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5628-
!io_op_defs[req->opcode].buffer_select)
5629-
return -EOPNOTSUPP;
5630-
5631-
id = READ_ONCE(sqe->personality);
5632-
if (id) {
5633-
req->work.creds = idr_find(&ctx->personality_idr, id);
5634-
if (unlikely(!req->work.creds))
5635-
return -EINVAL;
5636-
get_cred(req->work.creds);
5637-
}
5638-
5639-
/* same numerical values with corresponding REQ_F_*, safe to copy */
5640-
req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
5641-
IOSQE_ASYNC | IOSQE_FIXED_FILE |
5642-
IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
5643-
5644-
fd = READ_ONCE(sqe->fd);
5645-
ret = io_req_set_file(state, req, fd, sqe_flags);
5646-
if (unlikely(ret))
5647-
return ret;
5614+
int ret;
56485615

56495616
/*
56505617
* If we already have a head request, queue this one for async
@@ -5663,7 +5630,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
56635630
* next after the link request. The last one is done via
56645631
* drain_next flag to persist the effect across calls.
56655632
*/
5666-
if (sqe_flags & IOSQE_IO_DRAIN) {
5633+
if (req->flags & REQ_F_IO_DRAIN) {
56675634
head->flags |= REQ_F_IO_DRAIN;
56685635
ctx->drain_next = 1;
56695636
}
@@ -5680,16 +5647,16 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
56805647
list_add_tail(&req->link_list, &head->link_list);
56815648

56825649
/* last request of a link, enqueue the link */
5683-
if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
5650+
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
56845651
io_queue_link_head(head);
56855652
*link = NULL;
56865653
}
56875654
} else {
56885655
if (unlikely(ctx->drain_next)) {
56895656
req->flags |= REQ_F_IO_DRAIN;
5690-
req->ctx->drain_next = 0;
5657+
ctx->drain_next = 0;
56915658
}
5692-
if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
5659+
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
56935660
req->flags |= REQ_F_LINK_HEAD;
56945661
INIT_LIST_HEAD(&req->link_list);
56955662

@@ -5779,9 +5746,17 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
57795746
ctx->cached_sq_head++;
57805747
}
57815748

5782-
static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5783-
const struct io_uring_sqe *sqe)
5749+
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5750+
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5751+
IOSQE_BUFFER_SELECT)
5752+
5753+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5754+
const struct io_uring_sqe *sqe,
5755+
struct io_submit_state *state, bool async)
57845756
{
5757+
unsigned int sqe_flags;
5758+
int id, fd;
5759+
57855760
/*
57865761
* All io need record the previous position, if LINK vs DARIN,
57875762
* it can be used to mark the position of the first IO in the
@@ -5798,7 +5773,42 @@ static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
57985773
refcount_set(&req->refs, 2);
57995774
req->task = NULL;
58005775
req->result = 0;
5776+
req->needs_fixed_file = async;
58015777
INIT_IO_WORK(&req->work, io_wq_submit_work);
5778+
5779+
if (unlikely(req->opcode >= IORING_OP_LAST))
5780+
return -EINVAL;
5781+
5782+
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5783+
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
5784+
return -EFAULT;
5785+
use_mm(ctx->sqo_mm);
5786+
}
5787+
5788+
sqe_flags = READ_ONCE(sqe->flags);
5789+
/* enforce forwards compatibility on users */
5790+
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5791+
return -EINVAL;
5792+
5793+
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5794+
!io_op_defs[req->opcode].buffer_select)
5795+
return -EOPNOTSUPP;
5796+
5797+
id = READ_ONCE(sqe->personality);
5798+
if (id) {
5799+
req->work.creds = idr_find(&ctx->personality_idr, id);
5800+
if (unlikely(!req->work.creds))
5801+
return -EINVAL;
5802+
get_cred(req->work.creds);
5803+
}
5804+
5805+
/* same numerical values with corresponding REQ_F_*, safe to copy */
5806+
req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
5807+
IOSQE_ASYNC | IOSQE_FIXED_FILE |
5808+
IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
5809+
5810+
fd = READ_ONCE(sqe->fd);
5811+
return io_req_set_file(state, req, fd, sqe_flags);
58025812
}
58035813

58045814
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
@@ -5846,28 +5856,18 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
58465856
break;
58475857
}
58485858

5849-
io_init_req(ctx, req, sqe);
5859+
err = io_init_req(ctx, req, sqe, statep, async);
58505860
io_consume_sqe(ctx);
58515861
/* will complete beyond this point, count as submitted */
58525862
submitted++;
58535863

5854-
if (unlikely(req->opcode >= IORING_OP_LAST)) {
5855-
err = -EINVAL;
5864+
if (unlikely(err)) {
58565865
fail_req:
58575866
io_cqring_add_event(req, err);
58585867
io_double_put_req(req);
58595868
break;
58605869
}
58615870

5862-
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5863-
if (unlikely(!mmget_not_zero(ctx->sqo_mm))) {
5864-
err = -EFAULT;
5865-
goto fail_req;
5866-
}
5867-
use_mm(ctx->sqo_mm);
5868-
}
5869-
5870-
req->needs_fixed_file = async;
58715871
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
58725872
true, async);
58735873
err = io_submit_sqe(req, sqe, statep, &link);

0 commit comments

Comments
 (0)