@@ -489,7 +489,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
489
489
}
490
490
491
491
static void io_cqring_fill_event (struct io_ring_ctx * ctx , u64 ki_user_data ,
492
- long res , unsigned ev_flags )
492
+ long res )
493
493
{
494
494
struct io_uring_cqe * cqe ;
495
495
@@ -502,7 +502,7 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
502
502
if (cqe ) {
503
503
WRITE_ONCE (cqe -> user_data , ki_user_data );
504
504
WRITE_ONCE (cqe -> res , res );
505
- WRITE_ONCE (cqe -> flags , ev_flags );
505
+ WRITE_ONCE (cqe -> flags , 0 );
506
506
} else {
507
507
unsigned overflow = READ_ONCE (ctx -> cq_ring -> overflow );
508
508
@@ -521,12 +521,12 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
521
521
}
522
522
523
523
static void io_cqring_add_event (struct io_ring_ctx * ctx , u64 user_data ,
524
- long res , unsigned ev_flags )
524
+ long res )
525
525
{
526
526
unsigned long flags ;
527
527
528
528
spin_lock_irqsave (& ctx -> completion_lock , flags );
529
- io_cqring_fill_event (ctx , user_data , res , ev_flags );
529
+ io_cqring_fill_event (ctx , user_data , res );
530
530
io_commit_cqring (ctx );
531
531
spin_unlock_irqrestore (& ctx -> completion_lock , flags );
532
532
@@ -628,7 +628,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
628
628
req = list_first_entry (done , struct io_kiocb , list );
629
629
list_del (& req -> list );
630
630
631
- io_cqring_fill_event (ctx , req -> user_data , req -> error , 0 );
631
+ io_cqring_fill_event (ctx , req -> user_data , req -> error );
632
632
(* nr_events )++ ;
633
633
634
634
if (refcount_dec_and_test (& req -> refs )) {
@@ -776,7 +776,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
776
776
777
777
kiocb_end_write (kiocb );
778
778
779
- io_cqring_add_event (req -> ctx , req -> user_data , res , 0 );
779
+ io_cqring_add_event (req -> ctx , req -> user_data , res );
780
780
io_put_req (req );
781
781
}
782
782
@@ -1211,7 +1211,7 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
1211
1211
if (unlikely (ctx -> flags & IORING_SETUP_IOPOLL ))
1212
1212
return - EINVAL ;
1213
1213
1214
- io_cqring_add_event (ctx , user_data , err , 0 );
1214
+ io_cqring_add_event (ctx , user_data , err );
1215
1215
io_put_req (req );
1216
1216
return 0 ;
1217
1217
}
@@ -1256,7 +1256,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1256
1256
end > 0 ? end : LLONG_MAX ,
1257
1257
fsync_flags & IORING_FSYNC_DATASYNC );
1258
1258
1259
- io_cqring_add_event (req -> ctx , sqe -> user_data , ret , 0 );
1259
+ io_cqring_add_event (req -> ctx , sqe -> user_data , ret );
1260
1260
io_put_req (req );
1261
1261
return 0 ;
1262
1262
}
@@ -1300,7 +1300,7 @@ static int io_sync_file_range(struct io_kiocb *req,
1300
1300
1301
1301
ret = sync_file_range (req -> rw .ki_filp , sqe_off , sqe_len , flags );
1302
1302
1303
- io_cqring_add_event (req -> ctx , sqe -> user_data , ret , 0 );
1303
+ io_cqring_add_event (req -> ctx , sqe -> user_data , ret );
1304
1304
io_put_req (req );
1305
1305
return 0 ;
1306
1306
}
@@ -1358,7 +1358,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1358
1358
}
1359
1359
spin_unlock_irq (& ctx -> completion_lock );
1360
1360
1361
- io_cqring_add_event (req -> ctx , sqe -> user_data , ret , 0 );
1361
+ io_cqring_add_event (req -> ctx , sqe -> user_data , ret );
1362
1362
io_put_req (req );
1363
1363
return 0 ;
1364
1364
}
@@ -1367,7 +1367,7 @@ static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1367
1367
__poll_t mask )
1368
1368
{
1369
1369
req -> poll .done = true;
1370
- io_cqring_fill_event (ctx , req -> user_data , mangle_poll (mask ), 0 );
1370
+ io_cqring_fill_event (ctx , req -> user_data , mangle_poll (mask ));
1371
1371
io_commit_cqring (ctx );
1372
1372
}
1373
1373
@@ -1687,7 +1687,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
1687
1687
io_put_req (req );
1688
1688
1689
1689
if (ret ) {
1690
- io_cqring_add_event (ctx , sqe -> user_data , ret , 0 );
1690
+ io_cqring_add_event (ctx , sqe -> user_data , ret );
1691
1691
io_put_req (req );
1692
1692
}
1693
1693
@@ -1992,7 +1992,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
1992
1992
continue ;
1993
1993
}
1994
1994
1995
- io_cqring_add_event (ctx , sqes [i ].sqe -> user_data , ret , 0 );
1995
+ io_cqring_add_event (ctx , sqes [i ].sqe -> user_data , ret );
1996
1996
}
1997
1997
1998
1998
if (statep )
@@ -2157,7 +2157,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2157
2157
2158
2158
ret = io_submit_sqe (ctx , & s , statep );
2159
2159
if (ret )
2160
- io_cqring_add_event (ctx , s .sqe -> user_data , ret , 0 );
2160
+ io_cqring_add_event (ctx , s .sqe -> user_data , ret );
2161
2161
}
2162
2162
io_commit_sqring (ctx );
2163
2163
0 commit comments