@@ -74,10 +74,10 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
7474 if (!io_timeout_finish (timeout , data )) {
7575 if (io_req_post_cqe (req , - ETIME , IORING_CQE_F_MORE )) {
7676 /* re-arm timer */
77- spin_lock_irq (& ctx -> timeout_lock );
77+ raw_spin_lock_irq (& ctx -> timeout_lock );
7878 list_add (& timeout -> list , ctx -> timeout_list .prev );
7979 hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
80- spin_unlock_irq (& ctx -> timeout_lock );
80+ raw_spin_unlock_irq (& ctx -> timeout_lock );
8181 return ;
8282 }
8383 }
@@ -109,7 +109,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
109109 u32 seq ;
110110 struct io_timeout * timeout , * tmp ;
111111
112- spin_lock_irq (& ctx -> timeout_lock );
112+ raw_spin_lock_irq (& ctx -> timeout_lock );
113113 seq = ctx -> cached_cq_tail - atomic_read (& ctx -> cq_timeouts );
114114
115115 list_for_each_entry_safe (timeout , tmp , & ctx -> timeout_list , list ) {
@@ -134,7 +134,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
134134 io_kill_timeout (req , 0 );
135135 }
136136 ctx -> cq_last_tm_flush = seq ;
137- spin_unlock_irq (& ctx -> timeout_lock );
137+ raw_spin_unlock_irq (& ctx -> timeout_lock );
138138}
139139
140140static void io_req_tw_fail_links (struct io_kiocb * link , struct io_tw_state * ts )
@@ -200,9 +200,9 @@ void io_disarm_next(struct io_kiocb *req)
200200 } else if (req -> flags & REQ_F_LINK_TIMEOUT ) {
201201 struct io_ring_ctx * ctx = req -> ctx ;
202202
203- spin_lock_irq (& ctx -> timeout_lock );
203+ raw_spin_lock_irq (& ctx -> timeout_lock );
204204 link = io_disarm_linked_timeout (req );
205- spin_unlock_irq (& ctx -> timeout_lock );
205+ raw_spin_unlock_irq (& ctx -> timeout_lock );
206206 if (link )
207207 io_req_queue_tw_complete (link , - ECANCELED );
208208 }
@@ -238,11 +238,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
238238 struct io_ring_ctx * ctx = req -> ctx ;
239239 unsigned long flags ;
240240
241- spin_lock_irqsave (& ctx -> timeout_lock , flags );
241+ raw_spin_lock_irqsave (& ctx -> timeout_lock , flags );
242242 list_del_init (& timeout -> list );
243243 atomic_set (& req -> ctx -> cq_timeouts ,
244244 atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
245- spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
245+ raw_spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
246246
247247 if (!(data -> flags & IORING_TIMEOUT_ETIME_SUCCESS ))
248248 req_set_fail (req );
@@ -285,9 +285,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
285285{
286286 struct io_kiocb * req ;
287287
288- spin_lock_irq (& ctx -> timeout_lock );
288+ raw_spin_lock_irq (& ctx -> timeout_lock );
289289 req = io_timeout_extract (ctx , cd );
290- spin_unlock_irq (& ctx -> timeout_lock );
290+ raw_spin_unlock_irq (& ctx -> timeout_lock );
291291
292292 if (IS_ERR (req ))
293293 return PTR_ERR (req );
@@ -330,7 +330,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
330330 struct io_ring_ctx * ctx = req -> ctx ;
331331 unsigned long flags ;
332332
333- spin_lock_irqsave (& ctx -> timeout_lock , flags );
333+ raw_spin_lock_irqsave (& ctx -> timeout_lock , flags );
334334 prev = timeout -> head ;
335335 timeout -> head = NULL ;
336336
@@ -345,7 +345,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
345345 }
346346 list_del (& timeout -> list );
347347 timeout -> prev = prev ;
348- spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
348+ raw_spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
349349
350350 req -> io_task_work .func = io_req_task_link_timeout ;
351351 io_req_task_work_add (req );
@@ -472,12 +472,12 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
472472 } else {
473473 enum hrtimer_mode mode = io_translate_timeout_mode (tr -> flags );
474474
475- spin_lock_irq (& ctx -> timeout_lock );
475+ raw_spin_lock_irq (& ctx -> timeout_lock );
476476 if (tr -> ltimeout )
477477 ret = io_linked_timeout_update (ctx , tr -> addr , & tr -> ts , mode );
478478 else
479479 ret = io_timeout_update (ctx , tr -> addr , & tr -> ts , mode );
480- spin_unlock_irq (& ctx -> timeout_lock );
480+ raw_spin_unlock_irq (& ctx -> timeout_lock );
481481 }
482482
483483 if (ret < 0 )
@@ -572,7 +572,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
572572 struct list_head * entry ;
573573 u32 tail , off = timeout -> off ;
574574
575- spin_lock_irq (& ctx -> timeout_lock );
575+ raw_spin_lock_irq (& ctx -> timeout_lock );
576576
577577 /*
578578 * sqe->off holds how many events that need to occur for this
@@ -611,7 +611,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
611611 list_add (& timeout -> list , entry );
612612 data -> timer .function = io_timeout_fn ;
613613 hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
614- spin_unlock_irq (& ctx -> timeout_lock );
614+ raw_spin_unlock_irq (& ctx -> timeout_lock );
615615 return IOU_ISSUE_SKIP_COMPLETE ;
616616}
617617
@@ -620,7 +620,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
620620 struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
621621 struct io_ring_ctx * ctx = req -> ctx ;
622622
623- spin_lock_irq (& ctx -> timeout_lock );
623+ raw_spin_lock_irq (& ctx -> timeout_lock );
624624 /*
625625 * If the back reference is NULL, then our linked request finished
626626 * before we got a chance to setup the timer
@@ -633,7 +633,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
633633 data -> mode );
634634 list_add_tail (& timeout -> list , & ctx -> ltimeout_list );
635635 }
636- spin_unlock_irq (& ctx -> timeout_lock );
636+ raw_spin_unlock_irq (& ctx -> timeout_lock );
637637 /* drop submission reference */
638638 io_put_req (req );
639639}
@@ -668,15 +668,15 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
668668 * timeout_lockfirst to keep locking ordering.
669669 */
670670 spin_lock (& ctx -> completion_lock );
671- spin_lock_irq (& ctx -> timeout_lock );
671+ raw_spin_lock_irq (& ctx -> timeout_lock );
672672 list_for_each_entry_safe (timeout , tmp , & ctx -> timeout_list , list ) {
673673 struct io_kiocb * req = cmd_to_io_kiocb (timeout );
674674
675675 if (io_match_task (req , tctx , cancel_all ) &&
676676 io_kill_timeout (req , - ECANCELED ))
677677 canceled ++ ;
678678 }
679- spin_unlock_irq (& ctx -> timeout_lock );
679+ raw_spin_unlock_irq (& ctx -> timeout_lock );
680680 spin_unlock (& ctx -> completion_lock );
681681 return canceled != 0 ;
682682}
0 commit comments