@@ -2350,6 +2350,34 @@ static bool current_pending_io(void)
2350
2350
return percpu_counter_read_positive (& tctx -> inflight );
2351
2351
}
2352
2352
2353
+ static enum hrtimer_restart io_cqring_timer_wakeup (struct hrtimer * timer )
2354
+ {
2355
+ struct io_wait_queue * iowq = container_of (timer , struct io_wait_queue , t );
2356
+
2357
+ WRITE_ONCE (iowq -> hit_timeout , 1 );
2358
+ wake_up_process (iowq -> wq .private );
2359
+ return HRTIMER_NORESTART ;
2360
+ }
2361
+
2362
+ static int io_cqring_schedule_timeout (struct io_wait_queue * iowq ,
2363
+ clockid_t clock_id )
2364
+ {
2365
+ iowq -> hit_timeout = 0 ;
2366
+ hrtimer_init_on_stack (& iowq -> t , clock_id , HRTIMER_MODE_ABS );
2367
+ iowq -> t .function = io_cqring_timer_wakeup ;
2368
+ hrtimer_set_expires_range_ns (& iowq -> t , iowq -> timeout , 0 );
2369
+ hrtimer_start_expires (& iowq -> t , HRTIMER_MODE_ABS );
2370
+
2371
+ if (!READ_ONCE (iowq -> hit_timeout ))
2372
+ schedule ();
2373
+
2374
+ hrtimer_cancel (& iowq -> t );
2375
+ destroy_hrtimer_on_stack (& iowq -> t );
2376
+ __set_current_state (TASK_RUNNING );
2377
+
2378
+ return READ_ONCE (iowq -> hit_timeout ) ? - ETIME : 0 ;
2379
+ }
2380
+
2353
2381
static int __io_cqring_wait_schedule (struct io_ring_ctx * ctx ,
2354
2382
struct io_wait_queue * iowq )
2355
2383
{
@@ -2362,11 +2390,10 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2362
2390
*/
2363
2391
if (current_pending_io ())
2364
2392
current -> in_iowait = 1 ;
2365
- if (iowq -> timeout == KTIME_MAX )
2393
+ if (iowq -> timeout != KTIME_MAX )
2394
+ ret = io_cqring_schedule_timeout (iowq , ctx -> clockid );
2395
+ else
2366
2396
schedule ();
2367
- else if (!schedule_hrtimeout_range_clock (& iowq -> timeout , 0 ,
2368
- HRTIMER_MODE_ABS , ctx -> clockid ))
2369
- ret = - ETIME ;
2370
2397
current -> in_iowait = 0 ;
2371
2398
return ret ;
2372
2399
}
0 commit comments