@@ -2469,21 +2469,48 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
2469
2469
{
2470
2470
struct io_uring_cqe * cqe ;
2471
2471
2472
- trace_io_uring_complete (req -> ctx , req , req -> cqe .user_data ,
2473
- req -> cqe .res , req -> cqe .flags , 0 , 0 );
2472
+ if (!(ctx -> flags & IORING_SETUP_CQE32 )) {
2473
+ trace_io_uring_complete (req -> ctx , req , req -> cqe .user_data ,
2474
+ req -> cqe .res , req -> cqe .flags , 0 , 0 );
2474
2475
2475
- /*
2476
- * If we can't get a cq entry, userspace overflowed the
2477
- * submission (by quite a lot). Increment the overflow count in
2478
- * the ring.
2479
- */
2480
- cqe = io_get_cqe (ctx );
2481
- if (likely (cqe )) {
2482
- memcpy (cqe , & req -> cqe , sizeof (* cqe ));
2483
- return true;
2476
+ /*
2477
+ * If we can't get a cq entry, userspace overflowed the
2478
+ * submission (by quite a lot). Increment the overflow count in
2479
+ * the ring.
2480
+ */
2481
+ cqe = io_get_cqe (ctx );
2482
+ if (likely (cqe )) {
2483
+ memcpy (cqe , & req -> cqe , sizeof (* cqe ));
2484
+ return true;
2485
+ }
2486
+
2487
+ return io_cqring_event_overflow (ctx , req -> cqe .user_data ,
2488
+ req -> cqe .res , req -> cqe .flags ,
2489
+ 0 , 0 );
2490
+ } else {
2491
+ u64 extra1 = req -> extra1 ;
2492
+ u64 extra2 = req -> extra2 ;
2493
+
2494
+ trace_io_uring_complete (req -> ctx , req , req -> cqe .user_data ,
2495
+ req -> cqe .res , req -> cqe .flags , extra1 , extra2 );
2496
+
2497
+ /*
2498
+ * If we can't get a cq entry, userspace overflowed the
2499
+ * submission (by quite a lot). Increment the overflow count in
2500
+ * the ring.
2501
+ */
2502
+ cqe = io_get_cqe (ctx );
2503
+ if (likely (cqe )) {
2504
+ memcpy (cqe , & req -> cqe , sizeof (struct io_uring_cqe ));
2505
+ WRITE_ONCE (cqe -> big_cqe [0 ], extra1 );
2506
+ WRITE_ONCE (cqe -> big_cqe [1 ], extra2 );
2507
+ return true;
2508
+ }
2509
+
2510
+ return io_cqring_event_overflow (ctx , req -> cqe .user_data ,
2511
+ req -> cqe .res , req -> cqe .flags ,
2512
+ extra1 , extra2 );
2484
2513
}
2485
- return io_cqring_event_overflow (ctx , req -> cqe .user_data ,
2486
- req -> cqe .res , req -> cqe .flags , 0 , 0 );
2487
2514
}
2488
2515
2489
2516
static inline bool __io_fill_cqe32_req (struct io_ring_ctx * ctx ,
@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
3175
3202
struct io_kiocb * req = container_of (node , struct io_kiocb ,
3176
3203
comp_list );
3177
3204
3178
- if (!(req -> flags & REQ_F_CQE_SKIP )) {
3179
- if (!(ctx -> flags & IORING_SETUP_CQE32 ))
3180
- __io_fill_cqe_req (ctx , req );
3181
- else
3182
- __io_fill_cqe32_req (ctx , req );
3183
- }
3205
+ if (!(req -> flags & REQ_F_CQE_SKIP ))
3206
+ __io_fill_cqe_req (ctx , req );
3184
3207
}
3185
3208
3186
3209
io_commit_cqring (ctx );
0 commit comments