@@ -668,26 +668,7 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
668
668
io_commit_cqring_flush (ctx );
669
669
}
670
670
671
- static void io_cqring_overflow_kill (struct io_ring_ctx * ctx )
672
- {
673
- struct io_overflow_cqe * ocqe ;
674
- LIST_HEAD (list );
675
-
676
- lockdep_assert_held (& ctx -> uring_lock );
677
-
678
- spin_lock (& ctx -> completion_lock );
679
- list_splice_init (& ctx -> cq_overflow_list , & list );
680
- clear_bit (IO_CHECK_CQ_OVERFLOW_BIT , & ctx -> check_cq );
681
- spin_unlock (& ctx -> completion_lock );
682
-
683
- while (!list_empty (& list )) {
684
- ocqe = list_first_entry (& list , struct io_overflow_cqe , list );
685
- list_del (& ocqe -> list );
686
- kfree (ocqe );
687
- }
688
- }
689
-
690
- static void __io_cqring_overflow_flush (struct io_ring_ctx * ctx )
671
+ static void __io_cqring_overflow_flush (struct io_ring_ctx * ctx , bool dying )
691
672
{
692
673
size_t cqe_size = sizeof (struct io_uring_cqe );
693
674
@@ -704,11 +685,14 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
704
685
struct io_uring_cqe * cqe ;
705
686
struct io_overflow_cqe * ocqe ;
706
687
707
- if (!io_get_cqe_overflow (ctx , & cqe , true))
708
- break ;
709
688
ocqe = list_first_entry (& ctx -> cq_overflow_list ,
710
689
struct io_overflow_cqe , list );
711
- memcpy (cqe , & ocqe -> cqe , cqe_size );
690
+
691
+ if (!dying ) {
692
+ if (!io_get_cqe_overflow (ctx , & cqe , true))
693
+ break ;
694
+ memcpy (cqe , & ocqe -> cqe , cqe_size );
695
+ }
712
696
list_del (& ocqe -> list );
713
697
kfree (ocqe );
714
698
}
@@ -720,10 +704,16 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
720
704
io_cq_unlock_post (ctx );
721
705
}
722
706
707
+ static void io_cqring_overflow_kill (struct io_ring_ctx * ctx )
708
+ {
709
+ if (ctx -> rings )
710
+ __io_cqring_overflow_flush (ctx , true);
711
+ }
712
+
723
713
static void io_cqring_do_overflow_flush (struct io_ring_ctx * ctx )
724
714
{
725
715
mutex_lock (& ctx -> uring_lock );
726
- __io_cqring_overflow_flush (ctx );
716
+ __io_cqring_overflow_flush (ctx , false );
727
717
mutex_unlock (& ctx -> uring_lock );
728
718
}
729
719
@@ -1531,7 +1521,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1531
1521
check_cq = READ_ONCE (ctx -> check_cq );
1532
1522
if (unlikely (check_cq )) {
1533
1523
if (check_cq & BIT (IO_CHECK_CQ_OVERFLOW_BIT ))
1534
- __io_cqring_overflow_flush (ctx );
1524
+ __io_cqring_overflow_flush (ctx , false );
1535
1525
/*
1536
1526
* Similarly do not spin if we have not informed the user of any
1537
1527
* dropped CQE.
0 commit comments