Skip to content

Commit 6b23124

Browse files
isilenceaxboe
authored andcommitted
io_uring: consolidate overflow flushing
Consolidate __io_cqring_overflow_flush and io_cqring_overflow_kill() into a single function as it once was, it's easier to work with it this way. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/986b42c35e76a6be7aa0cdcda0a236a2222da3a7.1712708261.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 8d09a88 commit 6b23124

File tree

1 file changed

+15
-25
lines changed

1 file changed

+15
-25
lines changed

io_uring/io_uring.c

Lines changed: 15 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -668,26 +668,7 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
668668
io_commit_cqring_flush(ctx);
669669
}
670670

671-
static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
672-
{
673-
struct io_overflow_cqe *ocqe;
674-
LIST_HEAD(list);
675-
676-
lockdep_assert_held(&ctx->uring_lock);
677-
678-
spin_lock(&ctx->completion_lock);
679-
list_splice_init(&ctx->cq_overflow_list, &list);
680-
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
681-
spin_unlock(&ctx->completion_lock);
682-
683-
while (!list_empty(&list)) {
684-
ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
685-
list_del(&ocqe->list);
686-
kfree(ocqe);
687-
}
688-
}
689-
690-
static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
671+
static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
691672
{
692673
size_t cqe_size = sizeof(struct io_uring_cqe);
693674

@@ -704,11 +685,14 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
704685
struct io_uring_cqe *cqe;
705686
struct io_overflow_cqe *ocqe;
706687

707-
if (!io_get_cqe_overflow(ctx, &cqe, true))
708-
break;
709688
ocqe = list_first_entry(&ctx->cq_overflow_list,
710689
struct io_overflow_cqe, list);
711-
memcpy(cqe, &ocqe->cqe, cqe_size);
690+
691+
if (!dying) {
692+
if (!io_get_cqe_overflow(ctx, &cqe, true))
693+
break;
694+
memcpy(cqe, &ocqe->cqe, cqe_size);
695+
}
712696
list_del(&ocqe->list);
713697
kfree(ocqe);
714698
}
@@ -720,10 +704,16 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
720704
io_cq_unlock_post(ctx);
721705
}
722706

707+
static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
708+
{
709+
if (ctx->rings)
710+
__io_cqring_overflow_flush(ctx, true);
711+
}
712+
723713
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
724714
{
725715
mutex_lock(&ctx->uring_lock);
726-
__io_cqring_overflow_flush(ctx);
716+
__io_cqring_overflow_flush(ctx, false);
727717
mutex_unlock(&ctx->uring_lock);
728718
}
729719

@@ -1531,7 +1521,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
15311521
check_cq = READ_ONCE(ctx->check_cq);
15321522
if (unlikely(check_cq)) {
15331523
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1534-
__io_cqring_overflow_flush(ctx);
1524+
__io_cqring_overflow_flush(ctx, false);
15351525
/*
15361526
* Similarly do not spin if we have not informed the user of any
15371527
* dropped CQE.

0 commit comments

Comments
 (0)