@@ -724,8 +724,8 @@ static bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
724
724
}
725
725
726
726
static struct io_overflow_cqe * io_alloc_ocqe (struct io_ring_ctx * ctx ,
727
- u64 user_data , s32 res , u32 cflags ,
728
- u64 extra1 , u64 extra2 , gfp_t gfp )
727
+ struct io_cqe * cqe , u64 extra1 ,
728
+ u64 extra2 , gfp_t gfp )
729
729
{
730
730
struct io_overflow_cqe * ocqe ;
731
731
size_t ocq_size = sizeof (struct io_overflow_cqe );
@@ -735,11 +735,11 @@ static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
735
735
ocq_size += sizeof (struct io_uring_cqe );
736
736
737
737
ocqe = kmalloc (ocq_size , gfp | __GFP_ACCOUNT );
738
- trace_io_uring_cqe_overflow (ctx , user_data , res , cflags , ocqe );
738
+ trace_io_uring_cqe_overflow (ctx , cqe -> user_data , cqe -> res , cqe -> flags , ocqe );
739
739
if (ocqe ) {
740
- ocqe -> cqe .user_data = user_data ;
741
- ocqe -> cqe .res = res ;
742
- ocqe -> cqe .flags = cflags ;
740
+ ocqe -> cqe .user_data = cqe -> user_data ;
741
+ ocqe -> cqe .res = cqe -> res ;
742
+ ocqe -> cqe .flags = cqe -> flags ;
743
743
if (is_cqe32 ) {
744
744
ocqe -> cqe .big_cqe [0 ] = extra1 ;
745
745
ocqe -> cqe .big_cqe [1 ] = extra2 ;
@@ -806,6 +806,11 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
806
806
return false;
807
807
}
808
808
809
+ static inline struct io_cqe io_init_cqe (u64 user_data , s32 res , u32 cflags )
810
+ {
811
+ return (struct io_cqe ) { .user_data = user_data , .res = res , .flags = cflags };
812
+ }
813
+
809
814
bool io_post_aux_cqe (struct io_ring_ctx * ctx , u64 user_data , s32 res , u32 cflags )
810
815
{
811
816
bool filled ;
@@ -814,8 +819,9 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
814
819
filled = io_fill_cqe_aux (ctx , user_data , res , cflags );
815
820
if (unlikely (!filled )) {
816
821
struct io_overflow_cqe * ocqe ;
822
+ struct io_cqe cqe = io_init_cqe (user_data , res , cflags );
817
823
818
- ocqe = io_alloc_ocqe (ctx , user_data , res , cflags , 0 , 0 , GFP_ATOMIC );
824
+ ocqe = io_alloc_ocqe (ctx , & cqe , 0 , 0 , GFP_ATOMIC );
819
825
filled = io_cqring_add_overflow (ctx , ocqe );
820
826
}
821
827
io_cq_unlock_post (ctx );
@@ -833,8 +839,9 @@ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
833
839
834
840
if (!io_fill_cqe_aux (ctx , user_data , res , cflags )) {
835
841
struct io_overflow_cqe * ocqe ;
842
+ struct io_cqe cqe = io_init_cqe (user_data , res , cflags );
836
843
837
- ocqe = io_alloc_ocqe (ctx , user_data , res , cflags , 0 , 0 , GFP_KERNEL );
844
+ ocqe = io_alloc_ocqe (ctx , & cqe , 0 , 0 , GFP_KERNEL );
838
845
spin_lock (& ctx -> completion_lock );
839
846
io_cqring_add_overflow (ctx , ocqe );
840
847
spin_unlock (& ctx -> completion_lock );
@@ -1444,8 +1451,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1444
1451
gfp_t gfp = ctx -> lockless_cq ? GFP_KERNEL : GFP_ATOMIC ;
1445
1452
struct io_overflow_cqe * ocqe ;
1446
1453
1447
- ocqe = io_alloc_ocqe (ctx , req -> cqe .user_data , req -> cqe .res ,
1448
- req -> cqe .flags , req -> big_cqe .extra1 ,
1454
+ ocqe = io_alloc_ocqe (ctx , & req -> cqe , req -> big_cqe .extra1 ,
1449
1455
req -> big_cqe .extra2 , gfp );
1450
1456
if (ctx -> lockless_cq ) {
1451
1457
spin_lock (& ctx -> completion_lock );
0 commit comments