@@ -2464,8 +2464,8 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
2464
2464
return io_cqring_event_overflow (ctx , user_data , res , cflags , 0 , 0 );
2465
2465
}
2466
2466
2467
- static inline bool __io_fill_cqe_req_filled (struct io_ring_ctx * ctx ,
2468
- struct io_kiocb * req )
2467
+ static inline bool __io_fill_cqe_req (struct io_ring_ctx * ctx ,
2468
+ struct io_kiocb * req )
2469
2469
{
2470
2470
struct io_uring_cqe * cqe ;
2471
2471
@@ -2486,8 +2486,8 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
2486
2486
req -> cqe .res , req -> cqe .flags , 0 , 0 );
2487
2487
}
2488
2488
2489
- static inline bool __io_fill_cqe32_req_filled (struct io_ring_ctx * ctx ,
2490
- struct io_kiocb * req )
2489
+ static inline bool __io_fill_cqe32_req (struct io_ring_ctx * ctx ,
2490
+ struct io_kiocb * req )
2491
2491
{
2492
2492
struct io_uring_cqe * cqe ;
2493
2493
u64 extra1 = req -> extra1 ;
@@ -2513,44 +2513,6 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
2513
2513
req -> cqe .flags , extra1 , extra2 );
2514
2514
}
2515
2515
2516
- static inline bool __io_fill_cqe_req (struct io_kiocb * req , s32 res , u32 cflags )
2517
- {
2518
- trace_io_uring_complete (req -> ctx , req , req -> cqe .user_data , res , cflags , 0 , 0 );
2519
- return __io_fill_cqe (req -> ctx , req -> cqe .user_data , res , cflags );
2520
- }
2521
-
2522
- static inline void __io_fill_cqe32_req (struct io_kiocb * req , s32 res , u32 cflags ,
2523
- u64 extra1 , u64 extra2 )
2524
- {
2525
- struct io_ring_ctx * ctx = req -> ctx ;
2526
- struct io_uring_cqe * cqe ;
2527
-
2528
- if (WARN_ON_ONCE (!(ctx -> flags & IORING_SETUP_CQE32 )))
2529
- return ;
2530
- if (req -> flags & REQ_F_CQE_SKIP )
2531
- return ;
2532
-
2533
- trace_io_uring_complete (ctx , req , req -> cqe .user_data , res , cflags ,
2534
- extra1 , extra2 );
2535
-
2536
- /*
2537
- * If we can't get a cq entry, userspace overflowed the
2538
- * submission (by quite a lot). Increment the overflow count in
2539
- * the ring.
2540
- */
2541
- cqe = io_get_cqe (ctx );
2542
- if (likely (cqe )) {
2543
- WRITE_ONCE (cqe -> user_data , req -> cqe .user_data );
2544
- WRITE_ONCE (cqe -> res , res );
2545
- WRITE_ONCE (cqe -> flags , cflags );
2546
- WRITE_ONCE (cqe -> big_cqe [0 ], extra1 );
2547
- WRITE_ONCE (cqe -> big_cqe [1 ], extra2 );
2548
- return ;
2549
- }
2550
-
2551
- io_cqring_event_overflow (ctx , req -> cqe .user_data , res , cflags , extra1 , extra2 );
2552
- }
2553
-
2554
2516
static noinline bool io_fill_cqe_aux (struct io_ring_ctx * ctx , u64 user_data ,
2555
2517
s32 res , u32 cflags )
2556
2518
{
@@ -2593,16 +2555,24 @@ static void __io_req_complete_put(struct io_kiocb *req)
2593
2555
static void __io_req_complete_post (struct io_kiocb * req , s32 res ,
2594
2556
u32 cflags )
2595
2557
{
2596
- if (!(req -> flags & REQ_F_CQE_SKIP ))
2597
- __io_fill_cqe_req (req , res , cflags );
2558
+ if (!(req -> flags & REQ_F_CQE_SKIP )) {
2559
+ req -> cqe .res = res ;
2560
+ req -> cqe .flags = cflags ;
2561
+ __io_fill_cqe_req (req -> ctx , req );
2562
+ }
2598
2563
__io_req_complete_put (req );
2599
2564
}
2600
2565
2601
2566
static void __io_req_complete_post32 (struct io_kiocb * req , s32 res ,
2602
2567
u32 cflags , u64 extra1 , u64 extra2 )
2603
2568
{
2604
- if (!(req -> flags & REQ_F_CQE_SKIP ))
2605
- __io_fill_cqe32_req (req , res , cflags , extra1 , extra2 );
2569
+ if (!(req -> flags & REQ_F_CQE_SKIP )) {
2570
+ req -> cqe .res = res ;
2571
+ req -> cqe .flags = cflags ;
2572
+ req -> extra1 = extra1 ;
2573
+ req -> extra2 = extra2 ;
2574
+ __io_fill_cqe32_req (req -> ctx , req );
2575
+ }
2606
2576
__io_req_complete_put (req );
2607
2577
}
2608
2578
@@ -3207,9 +3177,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
3207
3177
3208
3178
if (!(req -> flags & REQ_F_CQE_SKIP )) {
3209
3179
if (!(ctx -> flags & IORING_SETUP_CQE32 ))
3210
- __io_fill_cqe_req_filled (ctx , req );
3180
+ __io_fill_cqe_req (ctx , req );
3211
3181
else
3212
- __io_fill_cqe32_req_filled (ctx , req );
3182
+ __io_fill_cqe32_req (ctx , req );
3213
3183
}
3214
3184
}
3215
3185
@@ -3329,7 +3299,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
3329
3299
nr_events ++ ;
3330
3300
if (unlikely (req -> flags & REQ_F_CQE_SKIP ))
3331
3301
continue ;
3332
- __io_fill_cqe_req (req , req -> cqe .res , io_put_kbuf (req , 0 ));
3302
+
3303
+ req -> cqe .flags = io_put_kbuf (req , 0 );
3304
+ __io_fill_cqe_req (req -> ctx , req );
3333
3305
}
3334
3306
3335
3307
if (unlikely (!nr_events ))
0 commit comments