Skip to content

Commit 10f466a

Browse files
committed
io_uring: split alloc and add of overflow
Add a new helper, io_alloc_ocqe(), that simply allocates and fills an overflow entry. Then it can get done outside of the locking section, and hence use more appropriate gfp_t allocation flags rather than always default to GFP_ATOMIC. Inspired by a previous series from Pavel: https://lore.kernel.org/io-uring/[email protected]/ Reviewed-by: Caleb Sander Mateos <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 5288b9e commit 10f466a

File tree

1 file changed

+45
-29
lines changed

1 file changed

+45
-29
lines changed

io_uring/io_uring.c

Lines changed: 45 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -697,20 +697,11 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
697697
}
698698
}
699699

700-
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
701-
s32 res, u32 cflags, u64 extra1, u64 extra2)
700+
static bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
701+
struct io_overflow_cqe *ocqe)
702702
{
703-
struct io_overflow_cqe *ocqe;
704-
size_t ocq_size = sizeof(struct io_overflow_cqe);
705-
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
706-
707703
lockdep_assert_held(&ctx->completion_lock);
708704

709-
if (is_cqe32)
710-
ocq_size += sizeof(struct io_uring_cqe);
711-
712-
ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
713-
trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
714705
if (!ocqe) {
715706
struct io_rings *r = ctx->rings;
716707

@@ -728,17 +719,35 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
728719
atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
729720

730721
}
731-
ocqe->cqe.user_data = user_data;
732-
ocqe->cqe.res = res;
733-
ocqe->cqe.flags = cflags;
734-
if (is_cqe32) {
735-
ocqe->cqe.big_cqe[0] = extra1;
736-
ocqe->cqe.big_cqe[1] = extra2;
737-
}
738722
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
739723
return true;
740724
}
741725

726+
static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
727+
u64 user_data, s32 res, u32 cflags,
728+
u64 extra1, u64 extra2, gfp_t gfp)
729+
{
730+
struct io_overflow_cqe *ocqe;
731+
size_t ocq_size = sizeof(struct io_overflow_cqe);
732+
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
733+
734+
if (is_cqe32)
735+
ocq_size += sizeof(struct io_uring_cqe);
736+
737+
ocqe = kmalloc(ocq_size, gfp | __GFP_ACCOUNT);
738+
trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
739+
if (ocqe) {
740+
ocqe->cqe.user_data = user_data;
741+
ocqe->cqe.res = res;
742+
ocqe->cqe.flags = cflags;
743+
if (is_cqe32) {
744+
ocqe->cqe.big_cqe[0] = extra1;
745+
ocqe->cqe.big_cqe[1] = extra2;
746+
}
747+
}
748+
return ocqe;
749+
}
750+
742751
/*
743752
* writes to the cq entry need to come after reading head; the
744753
* control dependency is enough as we're using WRITE_ONCE to
@@ -803,8 +812,12 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
803812

804813
io_cq_lock(ctx);
805814
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
806-
if (!filled)
807-
filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
815+
if (unlikely(!filled)) {
816+
struct io_overflow_cqe *ocqe;
817+
818+
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_ATOMIC);
819+
filled = io_cqring_add_overflow(ctx, ocqe);
820+
}
808821
io_cq_unlock_post(ctx);
809822
return filled;
810823
}
@@ -819,8 +832,11 @@ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
819832
lockdep_assert(ctx->lockless_cq);
820833

821834
if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
835+
struct io_overflow_cqe *ocqe;
836+
837+
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_KERNEL);
822838
spin_lock(&ctx->completion_lock);
823-
io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
839+
io_cqring_add_overflow(ctx, ocqe);
824840
spin_unlock(&ctx->completion_lock);
825841
}
826842
ctx->submit_state.cq_flush = true;
@@ -1425,18 +1441,18 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
14251441
*/
14261442
if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) &&
14271443
unlikely(!io_fill_cqe_req(ctx, req))) {
1444+
gfp_t gfp = ctx->lockless_cq ? GFP_KERNEL : GFP_ATOMIC;
1445+
struct io_overflow_cqe *ocqe;
1446+
1447+
ocqe = io_alloc_ocqe(ctx, req->cqe.user_data, req->cqe.res,
1448+
req->cqe.flags, req->big_cqe.extra1,
1449+
req->big_cqe.extra2, gfp);
14281450
if (ctx->lockless_cq) {
14291451
spin_lock(&ctx->completion_lock);
1430-
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
1431-
req->cqe.res, req->cqe.flags,
1432-
req->big_cqe.extra1,
1433-
req->big_cqe.extra2);
1452+
io_cqring_add_overflow(ctx, ocqe);
14341453
spin_unlock(&ctx->completion_lock);
14351454
} else {
1436-
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
1437-
req->cqe.res, req->cqe.flags,
1438-
req->big_cqe.extra1,
1439-
req->big_cqe.extra2);
1455+
io_cqring_add_overflow(ctx, ocqe);
14401456
}
14411457

14421458
memset(&req->big_cqe, 0, sizeof(req->big_cqe));

0 commit comments

Comments
 (0)