@@ -2493,11 +2493,20 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
2493
2493
return 0 ;
2494
2494
}
2495
2495
2496
+ static bool current_pending_io (void )
2497
+ {
2498
+ struct io_uring_task * tctx = current -> io_uring ;
2499
+
2500
+ if (!tctx )
2501
+ return false;
2502
+ return percpu_counter_read_positive (& tctx -> inflight );
2503
+ }
2504
+
2496
2505
/* when returns >0, the caller should retry */
2497
2506
static inline int io_cqring_wait_schedule (struct io_ring_ctx * ctx ,
2498
2507
struct io_wait_queue * iowq )
2499
2508
{
2500
- int token , ret ;
2509
+ int io_wait , ret ;
2501
2510
2502
2511
if (unlikely (READ_ONCE (ctx -> check_cq )))
2503
2512
return 1 ;
@@ -2511,17 +2520,19 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2511
2520
return 0 ;
2512
2521
2513
2522
/*
2514
- * Use io_schedule_prepare/finish, so cpufreq can take into account
2515
- * that the task is waiting for IO - turns out to be important for low
2516
- * QD IO.
2523
+ * Mark us as being in io_wait if we have pending requests, so cpufreq
2524
+ * can take into account that the task is waiting for IO - turns out
2525
+ * to be important for low QD IO.
2517
2526
*/
2518
- token = io_schedule_prepare ();
2527
+ io_wait = current -> in_iowait ;
2528
+ if (current_pending_io ())
2529
+ current -> in_iowait = 1 ;
2519
2530
ret = 0 ;
2520
2531
if (iowq -> timeout == KTIME_MAX )
2521
2532
schedule ();
2522
2533
else if (!schedule_hrtimeout (& iowq -> timeout , HRTIMER_MODE_ABS ))
2523
2534
ret = - ETIME ;
2524
- io_schedule_finish ( token ) ;
2535
+ current -> in_iowait = io_wait ;
2525
2536
return ret ;
2526
2537
}
2527
2538
0 commit comments