@@ -74,10 +74,10 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
74
74
if (!io_timeout_finish (timeout , data )) {
75
75
if (io_req_post_cqe (req , - ETIME , IORING_CQE_F_MORE )) {
76
76
/* re-arm timer */
77
- spin_lock_irq (& ctx -> timeout_lock );
77
+ raw_spin_lock_irq (& ctx -> timeout_lock );
78
78
list_add (& timeout -> list , ctx -> timeout_list .prev );
79
79
hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
80
- spin_unlock_irq (& ctx -> timeout_lock );
80
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
81
81
return ;
82
82
}
83
83
}
@@ -109,7 +109,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
109
109
u32 seq ;
110
110
struct io_timeout * timeout , * tmp ;
111
111
112
- spin_lock_irq (& ctx -> timeout_lock );
112
+ raw_spin_lock_irq (& ctx -> timeout_lock );
113
113
seq = ctx -> cached_cq_tail - atomic_read (& ctx -> cq_timeouts );
114
114
115
115
list_for_each_entry_safe (timeout , tmp , & ctx -> timeout_list , list ) {
@@ -134,7 +134,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
134
134
io_kill_timeout (req , 0 );
135
135
}
136
136
ctx -> cq_last_tm_flush = seq ;
137
- spin_unlock_irq (& ctx -> timeout_lock );
137
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
138
138
}
139
139
140
140
static void io_req_tw_fail_links (struct io_kiocb * link , struct io_tw_state * ts )
@@ -200,9 +200,9 @@ void io_disarm_next(struct io_kiocb *req)
200
200
} else if (req -> flags & REQ_F_LINK_TIMEOUT ) {
201
201
struct io_ring_ctx * ctx = req -> ctx ;
202
202
203
- spin_lock_irq (& ctx -> timeout_lock );
203
+ raw_spin_lock_irq (& ctx -> timeout_lock );
204
204
link = io_disarm_linked_timeout (req );
205
- spin_unlock_irq (& ctx -> timeout_lock );
205
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
206
206
if (link )
207
207
io_req_queue_tw_complete (link , - ECANCELED );
208
208
}
@@ -238,11 +238,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
238
238
struct io_ring_ctx * ctx = req -> ctx ;
239
239
unsigned long flags ;
240
240
241
- spin_lock_irqsave (& ctx -> timeout_lock , flags );
241
+ raw_spin_lock_irqsave (& ctx -> timeout_lock , flags );
242
242
list_del_init (& timeout -> list );
243
243
atomic_set (& req -> ctx -> cq_timeouts ,
244
244
atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
245
- spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
245
+ raw_spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
246
246
247
247
if (!(data -> flags & IORING_TIMEOUT_ETIME_SUCCESS ))
248
248
req_set_fail (req );
@@ -285,9 +285,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
285
285
{
286
286
struct io_kiocb * req ;
287
287
288
- spin_lock_irq (& ctx -> timeout_lock );
288
+ raw_spin_lock_irq (& ctx -> timeout_lock );
289
289
req = io_timeout_extract (ctx , cd );
290
- spin_unlock_irq (& ctx -> timeout_lock );
290
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
291
291
292
292
if (IS_ERR (req ))
293
293
return PTR_ERR (req );
@@ -330,7 +330,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
330
330
struct io_ring_ctx * ctx = req -> ctx ;
331
331
unsigned long flags ;
332
332
333
- spin_lock_irqsave (& ctx -> timeout_lock , flags );
333
+ raw_spin_lock_irqsave (& ctx -> timeout_lock , flags );
334
334
prev = timeout -> head ;
335
335
timeout -> head = NULL ;
336
336
@@ -345,7 +345,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
345
345
}
346
346
list_del (& timeout -> list );
347
347
timeout -> prev = prev ;
348
- spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
348
+ raw_spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
349
349
350
350
req -> io_task_work .func = io_req_task_link_timeout ;
351
351
io_req_task_work_add (req );
@@ -472,12 +472,12 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
472
472
} else {
473
473
enum hrtimer_mode mode = io_translate_timeout_mode (tr -> flags );
474
474
475
- spin_lock_irq (& ctx -> timeout_lock );
475
+ raw_spin_lock_irq (& ctx -> timeout_lock );
476
476
if (tr -> ltimeout )
477
477
ret = io_linked_timeout_update (ctx , tr -> addr , & tr -> ts , mode );
478
478
else
479
479
ret = io_timeout_update (ctx , tr -> addr , & tr -> ts , mode );
480
- spin_unlock_irq (& ctx -> timeout_lock );
480
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
481
481
}
482
482
483
483
if (ret < 0 )
@@ -572,7 +572,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
572
572
struct list_head * entry ;
573
573
u32 tail , off = timeout -> off ;
574
574
575
- spin_lock_irq (& ctx -> timeout_lock );
575
+ raw_spin_lock_irq (& ctx -> timeout_lock );
576
576
577
577
/*
578
578
* sqe->off holds how many events that need to occur for this
@@ -611,7 +611,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
611
611
list_add (& timeout -> list , entry );
612
612
data -> timer .function = io_timeout_fn ;
613
613
hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
614
- spin_unlock_irq (& ctx -> timeout_lock );
614
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
615
615
return IOU_ISSUE_SKIP_COMPLETE ;
616
616
}
617
617
@@ -620,7 +620,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
620
620
struct io_timeout * timeout = io_kiocb_to_cmd (req , struct io_timeout );
621
621
struct io_ring_ctx * ctx = req -> ctx ;
622
622
623
- spin_lock_irq (& ctx -> timeout_lock );
623
+ raw_spin_lock_irq (& ctx -> timeout_lock );
624
624
/*
625
625
* If the back reference is NULL, then our linked request finished
626
626
* before we got a chance to setup the timer
@@ -633,7 +633,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
633
633
data -> mode );
634
634
list_add_tail (& timeout -> list , & ctx -> ltimeout_list );
635
635
}
636
- spin_unlock_irq (& ctx -> timeout_lock );
636
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
637
637
/* drop submission reference */
638
638
io_put_req (req );
639
639
}
@@ -668,15 +668,15 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
668
668
* timeout_lockfirst to keep locking ordering.
669
669
*/
670
670
spin_lock (& ctx -> completion_lock );
671
- spin_lock_irq (& ctx -> timeout_lock );
671
+ raw_spin_lock_irq (& ctx -> timeout_lock );
672
672
list_for_each_entry_safe (timeout , tmp , & ctx -> timeout_list , list ) {
673
673
struct io_kiocb * req = cmd_to_io_kiocb (timeout );
674
674
675
675
if (io_match_task (req , tctx , cancel_all ) &&
676
676
io_kill_timeout (req , - ECANCELED ))
677
677
canceled ++ ;
678
678
}
679
- spin_unlock_irq (& ctx -> timeout_lock );
679
+ raw_spin_unlock_irq (& ctx -> timeout_lock );
680
680
spin_unlock (& ctx -> completion_lock );
681
681
return canceled != 0 ;
682
682
}
0 commit comments