Skip to content

Commit f26cc95

Browse files
isilenceaxboe
authored andcommitted
io_uring: lockdep annotate CQ locking
Locking around CQE posting is complex and depends on options the ring is created with, add more thorough lockdep annotations checking all invariants. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/aa3770b4eacae3915d782cc2ab2f395a99b4b232.1672795976.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 9ffa13f commit f26cc95

File tree

2 files changed

+17
-3
lines changed

2 files changed

+17
-3
lines changed

io_uring/io_uring.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
731731
size_t ocq_size = sizeof(struct io_overflow_cqe);
732732
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
733733

734+
lockdep_assert_held(&ctx->completion_lock);
735+
734736
if (is_cqe32)
735737
ocq_size += sizeof(struct io_uring_cqe);
736738

@@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
820822
{
821823
struct io_uring_cqe *cqe;
822824

823-
if (!ctx->task_complete)
824-
lockdep_assert_held(&ctx->completion_lock);
825-
826825
ctx->cq_extra++;
827826

828827
/*

io_uring/io_uring.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
7979
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
8080
bool cancel_all);
8181

82+
#define io_lockdep_assert_cq_locked(ctx) \
83+
do { \
84+
if (ctx->flags & IORING_SETUP_IOPOLL) { \
85+
lockdep_assert_held(&ctx->uring_lock); \
86+
} else if (!ctx->task_complete) { \
87+
lockdep_assert_held(&ctx->completion_lock); \
88+
} else if (ctx->submitter_task->flags & PF_EXITING) { \
89+
lockdep_assert(current_work()); \
90+
} else { \
91+
lockdep_assert(current == ctx->submitter_task); \
92+
} \
93+
} while (0)
94+
8295
static inline void io_req_task_work_add(struct io_kiocb *req)
8396
{
8497
__io_req_task_work_add(req, true);
@@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
92105
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
93106
bool overflow)
94107
{
108+
io_lockdep_assert_cq_locked(ctx);
109+
95110
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
96111
struct io_uring_cqe *cqe = ctx->cqe_cached;
97112

0 commit comments

Comments
 (0)