Skip to content

Commit 3030fd4

Browse files
committed
io-wq: remove spin-for-work optimization
Andres reports that buffered IO seems to suck up more cycles than we would like, and he narrowed it down to the fact that the io-wq workers will briefly spin for more work on completion of a work item. This was a win on the networking side, but apparently some other cases take a hit because of it. Remove the optimization to avoid burning more CPU than we have to for disk IO. Reported-by: Andres Freund <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent bdcd3ea commit 3030fd4

File tree

1 file changed

+0
-19
lines changed

1 file changed

+0
-19
lines changed

fs/io-wq.c

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -535,42 +535,23 @@ static void io_worker_handle_work(struct io_worker *worker)
535535
} while (1);
536536
}
537537

538-
static inline void io_worker_spin_for_work(struct io_wqe *wqe)
539-
{
540-
int i = 0;
541-
542-
while (++i < 1000) {
543-
if (io_wqe_run_queue(wqe))
544-
break;
545-
if (need_resched())
546-
break;
547-
cpu_relax();
548-
}
549-
}
550-
551538
static int io_wqe_worker(void *data)
552539
{
553540
struct io_worker *worker = data;
554541
struct io_wqe *wqe = worker->wqe;
555542
struct io_wq *wq = wqe->wq;
556-
bool did_work;
557543

558544
io_worker_start(wqe, worker);
559545

560-
did_work = false;
561546
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
562547
set_current_state(TASK_INTERRUPTIBLE);
563548
loop:
564-
if (did_work)
565-
io_worker_spin_for_work(wqe);
566549
spin_lock_irq(&wqe->lock);
567550
if (io_wqe_run_queue(wqe)) {
568551
__set_current_state(TASK_RUNNING);
569552
io_worker_handle_work(worker);
570-
did_work = true;
571553
goto loop;
572554
}
573-
did_work = false;
574555
/* drops the lock on success, retry */
575556
if (__io_worker_idle(wqe, worker)) {
576557
__release(&wqe->lock);

0 commit comments

Comments
 (0)