@@ -202,7 +202,7 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
202
202
* mean that the underlying data can be gone at any time. But that
203
203
* should be fixed seperately, and then this check could be killed.
204
204
*/
205
- if (!(req -> flags & REQ_F_REFCOUNT )) {
205
+ if (!(req -> flags & ( REQ_F_REISSUE | REQ_F_REFCOUNT ) )) {
206
206
req -> flags &= ~REQ_F_NEED_CLEANUP ;
207
207
io_rw_recycle (req , issue_flags );
208
208
}
@@ -455,19 +455,12 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
455
455
return NULL ;
456
456
}
457
457
458
- #ifdef CONFIG_BLOCK
459
- static void io_resubmit_prep (struct io_kiocb * req )
460
- {
461
- struct io_async_rw * io = req -> async_data ;
462
- struct io_rw * rw = io_kiocb_to_cmd (req , struct io_rw );
463
-
464
- io_meta_restore (io , & rw -> kiocb );
465
- iov_iter_restore (& io -> iter , & io -> iter_state );
466
- }
467
-
468
458
static bool io_rw_should_reissue (struct io_kiocb * req )
469
459
{
460
+ #ifdef CONFIG_BLOCK
461
+ struct io_rw * rw = io_kiocb_to_cmd (req , struct io_rw );
470
462
umode_t mode = file_inode (req -> file )-> i_mode ;
463
+ struct io_async_rw * io = req -> async_data ;
471
464
struct io_ring_ctx * ctx = req -> ctx ;
472
465
473
466
if (!S_ISBLK (mode ) && !S_ISREG (mode ))
@@ -488,17 +481,14 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
488
481
*/
489
482
if (!same_thread_group (req -> tctx -> task , current ) || !in_task ())
490
483
return false;
484
+
485
+ io_meta_restore (io , & rw -> kiocb );
486
+ iov_iter_restore (& io -> iter , & io -> iter_state );
491
487
return true;
492
- }
493
488
#else
494
- static void io_resubmit_prep (struct io_kiocb * req )
495
- {
496
- }
497
- static bool io_rw_should_reissue (struct io_kiocb * req )
498
- {
499
489
return false;
500
- }
501
490
#endif
491
+ }
502
492
503
493
static void io_req_end_write (struct io_kiocb * req )
504
494
{
@@ -525,22 +515,16 @@ static void io_req_io_end(struct io_kiocb *req)
525
515
}
526
516
}
527
517
528
- static bool __io_complete_rw_common (struct io_kiocb * req , long res )
518
+ static void __io_complete_rw_common (struct io_kiocb * req , long res )
529
519
{
530
- if (unlikely (res != req -> cqe .res )) {
531
- if (res == - EAGAIN && io_rw_should_reissue (req )) {
532
- /*
533
- * Reissue will start accounting again, finish the
534
- * current cycle.
535
- */
536
- io_req_io_end (req );
537
- req -> flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE ;
538
- return true;
539
- }
520
+ if (res == req -> cqe .res )
521
+ return ;
522
+ if (res == - EAGAIN && io_rw_should_reissue (req )) {
523
+ req -> flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE ;
524
+ } else {
540
525
req_set_fail (req );
541
526
req -> cqe .res = res ;
542
527
}
543
- return false;
544
528
}
545
529
546
530
static inline int io_fixup_rw_res (struct io_kiocb * req , long res )
@@ -583,8 +567,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
583
567
struct io_kiocb * req = cmd_to_io_kiocb (rw );
584
568
585
569
if (!kiocb -> dio_complete || !(kiocb -> ki_flags & IOCB_DIO_CALLER_COMP )) {
586
- if (__io_complete_rw_common (req , res ))
587
- return ;
570
+ __io_complete_rw_common (req , res );
588
571
io_req_set_res (req , io_fixup_rw_res (req , res ), 0 );
589
572
}
590
573
req -> io_task_work .func = io_req_rw_complete ;
@@ -646,26 +629,19 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
646
629
if (ret >= 0 && req -> flags & REQ_F_CUR_POS )
647
630
req -> file -> f_pos = rw -> kiocb .ki_pos ;
648
631
if (ret >= 0 && (rw -> kiocb .ki_complete == io_complete_rw )) {
649
- if (!__io_complete_rw_common (req , ret )) {
650
- /*
651
- * Safe to call io_end from here as we're inline
652
- * from the submission path.
653
- */
654
- io_req_io_end (req );
655
- io_req_set_res (req , final_ret ,
656
- io_put_kbuf (req , ret , issue_flags ));
657
- io_req_rw_cleanup (req , issue_flags );
658
- return IOU_OK ;
659
- }
632
+ __io_complete_rw_common (req , ret );
633
+ /*
634
+ * Safe to call io_end from here as we're inline
635
+ * from the submission path.
636
+ */
637
+ io_req_io_end (req );
638
+ io_req_set_res (req , final_ret , io_put_kbuf (req , ret , issue_flags ));
639
+ io_req_rw_cleanup (req , issue_flags );
640
+ return IOU_OK ;
660
641
} else {
661
642
io_rw_done (& rw -> kiocb , ret );
662
643
}
663
644
664
- if (req -> flags & REQ_F_REISSUE ) {
665
- req -> flags &= ~REQ_F_REISSUE ;
666
- io_resubmit_prep (req );
667
- return - EAGAIN ;
668
- }
669
645
return IOU_ISSUE_SKIP_COMPLETE ;
670
646
}
671
647
@@ -944,8 +920,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
944
920
if (ret == - EOPNOTSUPP && force_nonblock )
945
921
ret = - EAGAIN ;
946
922
947
- if (ret == - EAGAIN || (req -> flags & REQ_F_REISSUE )) {
948
- req -> flags &= ~REQ_F_REISSUE ;
923
+ if (ret == - EAGAIN ) {
949
924
/* If we can poll, just do that. */
950
925
if (io_file_can_poll (req ))
951
926
return - EAGAIN ;
@@ -1154,11 +1129,6 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
1154
1129
else
1155
1130
ret2 = - EINVAL ;
1156
1131
1157
- if (req -> flags & REQ_F_REISSUE ) {
1158
- req -> flags &= ~REQ_F_REISSUE ;
1159
- ret2 = - EAGAIN ;
1160
- }
1161
-
1162
1132
/*
1163
1133
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1164
1134
* retry them without IOCB_NOWAIT.
0 commit comments