Skip to content

Commit f33096a

Browse files
committed
io_uring: add io_add_aux_cqe() helper
This helper will post a CQE, and can be called from task_work where we now that the ctx is already properly locked and that deferred completions will get flushed later on. Signed-off-by: Jens Axboe <[email protected]>
1 parent c3ac76f commit f33096a

File tree

2 files changed

+22
-2
lines changed

2 files changed

+22
-2
lines changed

io_uring/io_uring.c

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -801,19 +801,38 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
801801
return false;
802802
}
803803

804-
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
804+
static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
805+
u32 cflags)
805806
{
806807
bool filled;
807808

808-
io_cq_lock(ctx);
809809
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
810810
if (!filled)
811811
filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
812812

813+
return filled;
814+
}
815+
816+
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
817+
{
818+
bool filled;
819+
820+
io_cq_lock(ctx);
821+
filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
813822
io_cq_unlock_post(ctx);
814823
return filled;
815824
}
816825

826+
/*
827+
* Must be called from inline task_work so we now a flush will happen later,
828+
* and obviously with ctx->uring_lock held (tw always has that).
829+
*/
830+
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
831+
{
832+
__io_post_aux_cqe(ctx, user_data, res, cflags);
833+
ctx->submit_state.cq_flush = true;
834+
}
835+
817836
/*
818837
* A helper for multishot requests posting additional CQEs.
819838
* Should only be used from a task_work including IO_URING_F_MULTISHOT.

io_uring/io_uring.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
6565
int io_run_task_work_sig(struct io_ring_ctx *ctx);
6666
void io_req_defer_failed(struct io_kiocb *req, s32 res);
6767
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
68+
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
6869
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
6970
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
7071

0 commit comments

Comments
 (0)