@@ -430,24 +430,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
430
430
return req -> link ;
431
431
}
432
432
433
- static inline struct io_kiocb * io_prep_linked_timeout (struct io_kiocb * req )
434
- {
435
- if (likely (!(req -> flags & REQ_F_ARM_LTIMEOUT )))
436
- return NULL ;
437
- return __io_prep_linked_timeout (req );
438
- }
439
-
440
- static noinline void __io_arm_ltimeout (struct io_kiocb * req )
441
- {
442
- io_queue_linked_timeout (__io_prep_linked_timeout (req ));
443
- }
444
-
445
- static inline void io_arm_ltimeout (struct io_kiocb * req )
446
- {
447
- if (unlikely (req -> flags & REQ_F_ARM_LTIMEOUT ))
448
- __io_arm_ltimeout (req );
449
- }
450
-
451
433
static void io_prep_async_work (struct io_kiocb * req )
452
434
{
453
435
const struct io_issue_def * def = & io_issue_defs [req -> opcode ];
@@ -500,7 +482,6 @@ static void io_prep_async_link(struct io_kiocb *req)
500
482
501
483
static void io_queue_iowq (struct io_kiocb * req )
502
484
{
503
- struct io_kiocb * link = io_prep_linked_timeout (req );
504
485
struct io_uring_task * tctx = req -> tctx ;
505
486
506
487
BUG_ON (!tctx );
@@ -525,8 +506,6 @@ static void io_queue_iowq(struct io_kiocb *req)
525
506
526
507
trace_io_uring_queue_async_work (req , io_wq_is_hashed (& req -> work ));
527
508
io_wq_enqueue (tctx -> io_wq , & req -> work );
528
- if (link )
529
- io_queue_linked_timeout (link );
530
509
}
531
510
532
511
static void io_req_queue_iowq_tw (struct io_kiocb * req , io_tw_token_t tw )
@@ -864,13 +843,26 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
864
843
struct io_ring_ctx * ctx = req -> ctx ;
865
844
bool posted ;
866
845
846
+ /*
847
+ * If multishot has already posted deferred completions, ensure that
848
+ * those are flushed first before posting this one. If not, CQEs
849
+ * could get reordered.
850
+ */
851
+ if (!wq_list_empty (& ctx -> submit_state .compl_reqs ))
852
+ __io_submit_flush_completions (ctx );
853
+
867
854
lockdep_assert (!io_wq_current_is_worker ());
868
855
lockdep_assert_held (& ctx -> uring_lock );
869
856
870
- __io_cq_lock (ctx );
871
- posted = io_fill_cqe_aux (ctx , req -> cqe .user_data , res , cflags );
857
+ if (!ctx -> lockless_cq ) {
858
+ spin_lock (& ctx -> completion_lock );
859
+ posted = io_fill_cqe_aux (ctx , req -> cqe .user_data , res , cflags );
860
+ spin_unlock (& ctx -> completion_lock );
861
+ } else {
862
+ posted = io_fill_cqe_aux (ctx , req -> cqe .user_data , res , cflags );
863
+ }
864
+
872
865
ctx -> submit_state .cq_flush = true;
873
- __io_cq_unlock_post (ctx );
874
866
return posted ;
875
867
}
876
868
@@ -1058,21 +1050,22 @@ static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
1058
1050
while (node ) {
1059
1051
req = container_of (node , struct io_kiocb , io_task_work .node );
1060
1052
node = node -> next ;
1061
- if (sync && last_ctx != req -> ctx ) {
1053
+ if (last_ctx != req -> ctx ) {
1062
1054
if (last_ctx ) {
1063
- flush_delayed_work (& last_ctx -> fallback_work );
1055
+ if (sync )
1056
+ flush_delayed_work (& last_ctx -> fallback_work );
1064
1057
percpu_ref_put (& last_ctx -> refs );
1065
1058
}
1066
1059
last_ctx = req -> ctx ;
1067
1060
percpu_ref_get (& last_ctx -> refs );
1068
1061
}
1069
- if (llist_add (& req -> io_task_work .node ,
1070
- & req -> ctx -> fallback_llist ))
1071
- schedule_delayed_work (& req -> ctx -> fallback_work , 1 );
1062
+ if (llist_add (& req -> io_task_work .node , & last_ctx -> fallback_llist ))
1063
+ schedule_delayed_work (& last_ctx -> fallback_work , 1 );
1072
1064
}
1073
1065
1074
1066
if (last_ctx ) {
1075
- flush_delayed_work (& last_ctx -> fallback_work );
1067
+ if (sync )
1068
+ flush_delayed_work (& last_ctx -> fallback_work );
1076
1069
percpu_ref_put (& last_ctx -> refs );
1077
1070
}
1078
1071
}
@@ -1684,15 +1677,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1684
1677
return !!req -> file ;
1685
1678
}
1686
1679
1680
+ #define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1681
+
1687
1682
static inline int __io_issue_sqe (struct io_kiocb * req ,
1688
1683
unsigned int issue_flags ,
1689
1684
const struct io_issue_def * def )
1690
1685
{
1691
1686
const struct cred * creds = NULL ;
1687
+ struct io_kiocb * link = NULL ;
1692
1688
int ret ;
1693
1689
1694
- if (unlikely ((req -> flags & REQ_F_CREDS ) && req -> creds != current_cred ()))
1695
- creds = override_creds (req -> creds );
1690
+ if (unlikely (req -> flags & REQ_ISSUE_SLOW_FLAGS )) {
1691
+ if ((req -> flags & REQ_F_CREDS ) && req -> creds != current_cred ())
1692
+ creds = override_creds (req -> creds );
1693
+ if (req -> flags & REQ_F_ARM_LTIMEOUT )
1694
+ link = __io_prep_linked_timeout (req );
1695
+ }
1696
1696
1697
1697
if (!def -> audit_skip )
1698
1698
audit_uring_entry (req -> opcode );
@@ -1702,8 +1702,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req,
1702
1702
if (!def -> audit_skip )
1703
1703
audit_uring_exit (!ret , ret );
1704
1704
1705
- if (creds )
1706
- revert_creds (creds );
1705
+ if (unlikely (creds || link )) {
1706
+ if (creds )
1707
+ revert_creds (creds );
1708
+ if (link )
1709
+ io_queue_linked_timeout (link );
1710
+ }
1707
1711
1708
1712
return ret ;
1709
1713
}
@@ -1729,7 +1733,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1729
1733
1730
1734
if (ret == IOU_ISSUE_SKIP_COMPLETE ) {
1731
1735
ret = 0 ;
1732
- io_arm_ltimeout (req );
1733
1736
1734
1737
/* If the op doesn't have a file, we're not polling for it */
1735
1738
if ((req -> ctx -> flags & IORING_SETUP_IOPOLL ) && def -> iopoll_queue )
@@ -1784,8 +1787,6 @@ void io_wq_submit_work(struct io_wq_work *work)
1784
1787
else
1785
1788
req_ref_get (req );
1786
1789
1787
- io_arm_ltimeout (req );
1788
-
1789
1790
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1790
1791
if (atomic_read (& work -> flags ) & IO_WQ_WORK_CANCEL ) {
1791
1792
fail :
@@ -1902,15 +1903,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1902
1903
static void io_queue_async (struct io_kiocb * req , int ret )
1903
1904
__must_hold (& req - > ctx - > uring_lock )
1904
1905
{
1905
- struct io_kiocb * linked_timeout ;
1906
-
1907
1906
if (ret != - EAGAIN || (req -> flags & REQ_F_NOWAIT )) {
1908
1907
io_req_defer_failed (req , ret );
1909
1908
return ;
1910
1909
}
1911
1910
1912
- linked_timeout = io_prep_linked_timeout (req );
1913
-
1914
1911
switch (io_arm_poll_handler (req , 0 )) {
1915
1912
case IO_APOLL_READY :
1916
1913
io_kbuf_recycle (req , 0 );
@@ -1923,9 +1920,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
1923
1920
case IO_APOLL_OK :
1924
1921
break ;
1925
1922
}
1926
-
1927
- if (linked_timeout )
1928
- io_queue_linked_timeout (linked_timeout );
1929
1923
}
1930
1924
1931
1925
static inline void io_queue_sqe (struct io_kiocb * req )
0 commit comments