Skip to content

Commit f43de1f

Browse files
isilenceaxboe
authored andcommitted
io_uring: unite fill_cqe and the 32B version
We want just one function that will handle both normal cqes and 32B cqes. Combine __io_fill_cqe_req() and __io_fill_cqe_req32(). It's still not entirely correct yet, but saves us from cases when we fill an CQE of a wrong size. Fixes: 76c68fb ("io_uring: enable CQE32") Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/8085c5b2f74141520f60decd45334f87e389b718.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 91ef75a commit f43de1f

File tree

1 file changed

+42
-19
lines changed

1 file changed

+42
-19
lines changed

fs/io_uring.c

Lines changed: 42 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2469,21 +2469,48 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
24692469
{
24702470
struct io_uring_cqe *cqe;
24712471

2472-
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2473-
req->cqe.res, req->cqe.flags, 0, 0);
2472+
if (!(ctx->flags & IORING_SETUP_CQE32)) {
2473+
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2474+
req->cqe.res, req->cqe.flags, 0, 0);
24742475

2475-
/*
2476-
* If we can't get a cq entry, userspace overflowed the
2477-
* submission (by quite a lot). Increment the overflow count in
2478-
* the ring.
2479-
*/
2480-
cqe = io_get_cqe(ctx);
2481-
if (likely(cqe)) {
2482-
memcpy(cqe, &req->cqe, sizeof(*cqe));
2483-
return true;
2476+
/*
2477+
* If we can't get a cq entry, userspace overflowed the
2478+
* submission (by quite a lot). Increment the overflow count in
2479+
* the ring.
2480+
*/
2481+
cqe = io_get_cqe(ctx);
2482+
if (likely(cqe)) {
2483+
memcpy(cqe, &req->cqe, sizeof(*cqe));
2484+
return true;
2485+
}
2486+
2487+
return io_cqring_event_overflow(ctx, req->cqe.user_data,
2488+
req->cqe.res, req->cqe.flags,
2489+
0, 0);
2490+
} else {
2491+
u64 extra1 = req->extra1;
2492+
u64 extra2 = req->extra2;
2493+
2494+
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2495+
req->cqe.res, req->cqe.flags, extra1, extra2);
2496+
2497+
/*
2498+
* If we can't get a cq entry, userspace overflowed the
2499+
* submission (by quite a lot). Increment the overflow count in
2500+
* the ring.
2501+
*/
2502+
cqe = io_get_cqe(ctx);
2503+
if (likely(cqe)) {
2504+
memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
2505+
WRITE_ONCE(cqe->big_cqe[0], extra1);
2506+
WRITE_ONCE(cqe->big_cqe[1], extra2);
2507+
return true;
2508+
}
2509+
2510+
return io_cqring_event_overflow(ctx, req->cqe.user_data,
2511+
req->cqe.res, req->cqe.flags,
2512+
extra1, extra2);
24842513
}
2485-
return io_cqring_event_overflow(ctx, req->cqe.user_data,
2486-
req->cqe.res, req->cqe.flags, 0, 0);
24872514
}
24882515

24892516
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
31753202
struct io_kiocb *req = container_of(node, struct io_kiocb,
31763203
comp_list);
31773204

3178-
if (!(req->flags & REQ_F_CQE_SKIP)) {
3179-
if (!(ctx->flags & IORING_SETUP_CQE32))
3180-
__io_fill_cqe_req(ctx, req);
3181-
else
3182-
__io_fill_cqe32_req(ctx, req);
3183-
}
3205+
if (!(req->flags & REQ_F_CQE_SKIP))
3206+
__io_fill_cqe_req(ctx, req);
31843207
}
31853208

31863209
io_commit_cqring(ctx);

0 commit comments

Comments
 (0)