@@ -739,14 +739,6 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
739
739
return true;
740
740
}
741
741
742
- static void io_req_cqe_overflow (struct io_kiocb * req )
743
- {
744
- io_cqring_event_overflow (req -> ctx , req -> cqe .user_data ,
745
- req -> cqe .res , req -> cqe .flags ,
746
- req -> big_cqe .extra1 , req -> big_cqe .extra2 );
747
- memset (& req -> big_cqe , 0 , sizeof (req -> big_cqe ));
748
- }
749
-
750
742
/*
751
743
* writes to the cq entry need to come after reading head; the
752
744
* control dependency is enough as we're using WRITE_ONCE to
@@ -1435,11 +1427,19 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1435
1427
unlikely (!io_fill_cqe_req (ctx , req ))) {
1436
1428
if (ctx -> lockless_cq ) {
1437
1429
spin_lock (& ctx -> completion_lock );
1438
- io_req_cqe_overflow (req );
1430
+ io_cqring_event_overflow (req -> ctx , req -> cqe .user_data ,
1431
+ req -> cqe .res , req -> cqe .flags ,
1432
+ req -> big_cqe .extra1 ,
1433
+ req -> big_cqe .extra2 );
1439
1434
spin_unlock (& ctx -> completion_lock );
1440
1435
} else {
1441
- io_req_cqe_overflow (req );
1436
+ io_cqring_event_overflow (req -> ctx , req -> cqe .user_data ,
1437
+ req -> cqe .res , req -> cqe .flags ,
1438
+ req -> big_cqe .extra1 ,
1439
+ req -> big_cqe .extra2 );
1442
1440
}
1441
+
1442
+ memset (& req -> big_cqe , 0 , sizeof (req -> big_cqe ));
1443
1443
}
1444
1444
}
1445
1445
__io_cq_unlock_post (ctx );
0 commit comments