Skip to content

Commit e126859

Browse files
committed
spi: Only defer to thread for cleanup when needed
Currently we always defer idling of controllers to the SPI thread, the goal being to ensure that we're doing teardown that's not suitable for atomic context in an appropriate context and to try to batch up more expensive teardown operations when the system is under higher load, allowing more work to be started before the SPI thread is scheduled. However when the controller does not require any substantial work to idle there is no need to do this, we can instead save the context switch and immediately mark the controller as idle. This is particularly useful for systems where there is frequent but not constant activity. Signed-off-by: Mark Brown <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Mark Brown <[email protected]>
1 parent 2ae3de1 commit e126859

File tree

1 file changed

+19
-7
lines changed

1 file changed

+19
-7
lines changed

drivers/spi/spi.c

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1336,6 +1336,14 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr)
13361336
}
13371337
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
13381338

1339+
static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1340+
{
1341+
if (ctlr->auto_runtime_pm) {
1342+
pm_runtime_mark_last_busy(ctlr->dev.parent);
1343+
pm_runtime_put_autosuspend(ctlr->dev.parent);
1344+
}
1345+
}
1346+
13391347
/**
13401348
* __spi_pump_messages - function which processes spi message queue
13411349
* @ctlr: controller to process queue for
@@ -1380,10 +1388,17 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
13801388
return;
13811389
}
13821390

1383-
/* Only do teardown in the thread */
1391+
/* Defer any non-atomic teardown to the thread */
13841392
if (!in_kthread) {
1385-
kthread_queue_work(ctlr->kworker,
1386-
&ctlr->pump_messages);
1393+
if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1394+
!ctlr->unprepare_transfer_hardware) {
1395+
spi_idle_runtime_pm(ctlr);
1396+
ctlr->busy = false;
1397+
trace_spi_controller_idle(ctlr);
1398+
} else {
1399+
kthread_queue_work(ctlr->kworker,
1400+
&ctlr->pump_messages);
1401+
}
13871402
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
13881403
return;
13891404
}
@@ -1400,10 +1415,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
14001415
ctlr->unprepare_transfer_hardware(ctlr))
14011416
dev_err(&ctlr->dev,
14021417
"failed to unprepare transfer hardware\n");
1403-
if (ctlr->auto_runtime_pm) {
1404-
pm_runtime_mark_last_busy(ctlr->dev.parent);
1405-
pm_runtime_put_autosuspend(ctlr->dev.parent);
1406-
}
1418+
spi_idle_runtime_pm(ctlr);
14071419
trace_spi_controller_idle(ctlr);
14081420

14091421
spin_lock_irqsave(&ctlr->queue_lock, flags);

0 commit comments

Comments
 (0)