Skip to content

Commit 072d37b

Browse files
committed
io_uring: make io_alloc_ocqe() take a struct io_cqe pointer
The number of arguments to io_alloc_ocqe() is a bit unwieldy. Make it take a struct io_cqe pointer rather than three separate CQE args. One path already has that readily available, add an io_init_cqe() helper for the remaining two. Reviewed-by: Caleb Sander Mateos <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 10f466a commit 072d37b

File tree

1 file changed

+16
-10
lines changed

1 file changed

+16
-10
lines changed

io_uring/io_uring.c

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -724,8 +724,8 @@ static bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
724724
}
725725

726726
static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
727-
u64 user_data, s32 res, u32 cflags,
728-
u64 extra1, u64 extra2, gfp_t gfp)
727+
struct io_cqe *cqe, u64 extra1,
728+
u64 extra2, gfp_t gfp)
729729
{
730730
struct io_overflow_cqe *ocqe;
731731
size_t ocq_size = sizeof(struct io_overflow_cqe);
@@ -735,11 +735,11 @@ static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
735735
ocq_size += sizeof(struct io_uring_cqe);
736736

737737
ocqe = kmalloc(ocq_size, gfp | __GFP_ACCOUNT);
738-
trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
738+
trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe);
739739
if (ocqe) {
740-
ocqe->cqe.user_data = user_data;
741-
ocqe->cqe.res = res;
742-
ocqe->cqe.flags = cflags;
740+
ocqe->cqe.user_data = cqe->user_data;
741+
ocqe->cqe.res = cqe->res;
742+
ocqe->cqe.flags = cqe->flags;
743743
if (is_cqe32) {
744744
ocqe->cqe.big_cqe[0] = extra1;
745745
ocqe->cqe.big_cqe[1] = extra2;
@@ -806,6 +806,11 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
806806
return false;
807807
}
808808

809+
static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
810+
{
811+
return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags };
812+
}
813+
809814
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
810815
{
811816
bool filled;
@@ -814,8 +819,9 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
814819
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
815820
if (unlikely(!filled)) {
816821
struct io_overflow_cqe *ocqe;
822+
struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
817823

818-
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_ATOMIC);
824+
ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_ATOMIC);
819825
filled = io_cqring_add_overflow(ctx, ocqe);
820826
}
821827
io_cq_unlock_post(ctx);
@@ -833,8 +839,9 @@ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
833839

834840
if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
835841
struct io_overflow_cqe *ocqe;
842+
struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
836843

837-
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_KERNEL);
844+
ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_KERNEL);
838845
spin_lock(&ctx->completion_lock);
839846
io_cqring_add_overflow(ctx, ocqe);
840847
spin_unlock(&ctx->completion_lock);
@@ -1444,8 +1451,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
14441451
gfp_t gfp = ctx->lockless_cq ? GFP_KERNEL : GFP_ATOMIC;
14451452
struct io_overflow_cqe *ocqe;
14461453

1447-
ocqe = io_alloc_ocqe(ctx, req->cqe.user_data, req->cqe.res,
1448-
req->cqe.flags, req->big_cqe.extra1,
1454+
ocqe = io_alloc_ocqe(ctx, &req->cqe, req->big_cqe.extra1,
14491455
req->big_cqe.extra2, gfp);
14501456
if (ctx->lockless_cq) {
14511457
spin_lock(&ctx->completion_lock);

0 commit comments

Comments
 (0)