Skip to content

Commit 29ede20

Browse files
isilenceaxboe
authored andcommitted
io_uring: fill extra big cqe fields from req
The only user of io_req_complete32()-like functions is cmd requests. Instead of keeping the whole complete32 family, remove them and provide the extras in already added for inline completions req->extra{1,2}. When fill_cqe_res() finds CQE32 option enabled it'll use those fields to fill a 32B cqe. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/af1319eb661b1f9a0abceb51cbbf72b8002e019d.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent f43de1f commit 29ede20

File tree

1 file changed

+10
-68
lines changed

1 file changed

+10
-68
lines changed

fs/io_uring.c

Lines changed: 10 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -2513,33 +2513,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
25132513
}
25142514
}
25152515

2516-
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
2517-
struct io_kiocb *req)
2518-
{
2519-
struct io_uring_cqe *cqe;
2520-
u64 extra1 = req->extra1;
2521-
u64 extra2 = req->extra2;
2522-
2523-
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2524-
req->cqe.res, req->cqe.flags, extra1, extra2);
2525-
2526-
/*
2527-
* If we can't get a cq entry, userspace overflowed the
2528-
* submission (by quite a lot). Increment the overflow count in
2529-
* the ring.
2530-
*/
2531-
cqe = io_get_cqe(ctx);
2532-
if (likely(cqe)) {
2533-
memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
2534-
cqe->big_cqe[0] = extra1;
2535-
cqe->big_cqe[1] = extra2;
2536-
return true;
2537-
}
2538-
2539-
return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
2540-
req->cqe.flags, extra1, extra2);
2541-
}
2542-
25432516
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
25442517
s32 res, u32 cflags)
25452518
{
@@ -2590,19 +2563,6 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
25902563
__io_req_complete_put(req);
25912564
}
25922565

2593-
static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
2594-
u32 cflags, u64 extra1, u64 extra2)
2595-
{
2596-
if (!(req->flags & REQ_F_CQE_SKIP)) {
2597-
req->cqe.res = res;
2598-
req->cqe.flags = cflags;
2599-
req->extra1 = extra1;
2600-
req->extra2 = extra2;
2601-
__io_fill_cqe32_req(req->ctx, req);
2602-
}
2603-
__io_req_complete_put(req);
2604-
}
2605-
26062566
static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
26072567
{
26082568
struct io_ring_ctx *ctx = req->ctx;
@@ -2614,18 +2574,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
26142574
io_cqring_ev_posted(ctx);
26152575
}
26162576

2617-
static void io_req_complete_post32(struct io_kiocb *req, s32 res,
2618-
u32 cflags, u64 extra1, u64 extra2)
2619-
{
2620-
struct io_ring_ctx *ctx = req->ctx;
2621-
2622-
spin_lock(&ctx->completion_lock);
2623-
__io_req_complete_post32(req, res, cflags, extra1, extra2);
2624-
io_commit_cqring(ctx);
2625-
spin_unlock(&ctx->completion_lock);
2626-
io_cqring_ev_posted(ctx);
2627-
}
2628-
26292577
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
26302578
u32 cflags)
26312579
{
@@ -2643,19 +2591,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
26432591
io_req_complete_post(req, res, cflags);
26442592
}
26452593

2646-
static inline void __io_req_complete32(struct io_kiocb *req,
2647-
unsigned int issue_flags, s32 res,
2648-
u32 cflags, u64 extra1, u64 extra2)
2649-
{
2650-
if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
2651-
io_req_complete_state(req, res, cflags);
2652-
req->extra1 = extra1;
2653-
req->extra2 = extra2;
2654-
} else {
2655-
io_req_complete_post32(req, res, cflags, extra1, extra2);
2656-
}
2657-
}
2658-
26592594
static inline void io_req_complete(struct io_kiocb *req, s32 res)
26602595
{
26612596
if (res < 0)
@@ -5079,6 +5014,13 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
50795014
}
50805015
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
50815016

5017+
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
5018+
u64 extra1, u64 extra2)
5019+
{
5020+
req->extra1 = extra1;
5021+
req->extra2 = extra2;
5022+
}
5023+
50825024
/*
50835025
* Called by consumers of io_uring_cmd, if they originally returned
50845026
* -EIOCBQUEUED upon receiving the command.
@@ -5089,10 +5031,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
50895031

50905032
if (ret < 0)
50915033
req_set_fail(req);
5034+
50925035
if (req->ctx->flags & IORING_SETUP_CQE32)
5093-
__io_req_complete32(req, 0, ret, 0, res2, 0);
5094-
else
5095-
io_req_complete(req, ret);
5036+
io_req_set_cqe32_extra(req, res2, 0);
5037+
io_req_complete(req, ret);
50965038
}
50975039
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
50985040

0 commit comments

Comments
 (0)