@@ -839,17 +839,20 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
839839 u32 command1 , command2 , speed = t -> speed_hz ;
840840 u8 bits_per_word = t -> bits_per_word ;
841841 u32 tx_tap = 0 , rx_tap = 0 ;
842+ unsigned long flags ;
842843 int req_mode ;
843844
844845 if (!has_acpi_companion (tqspi -> dev ) && speed != tqspi -> cur_speed ) {
845846 clk_set_rate (tqspi -> clk , speed );
846847 tqspi -> cur_speed = speed ;
847848 }
848849
850+ spin_lock_irqsave (& tqspi -> lock , flags );
849851 tqspi -> cur_pos = 0 ;
850852 tqspi -> cur_rx_pos = 0 ;
851853 tqspi -> cur_tx_pos = 0 ;
852854 tqspi -> curr_xfer = t ;
855+ spin_unlock_irqrestore (& tqspi -> lock , flags );
853856
854857 if (is_first_of_msg ) {
855858 tegra_qspi_mask_clear_irq (tqspi );
@@ -1158,6 +1161,7 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
11581161 u32 address_value = 0 ;
11591162 u32 cmd_config = 0 , addr_config = 0 ;
11601163 u8 cmd_value = 0 , val = 0 ;
1164+ unsigned long flags ;
11611165
11621166 /* Enable Combined sequence mode */
11631167 val = tegra_qspi_readl (tqspi , QSPI_GLOBAL_CONFIG );
@@ -1261,13 +1265,17 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
12611265 tegra_qspi_transfer_end (spi );
12621266 spi_transfer_delay_exec (xfer );
12631267 }
1268+ spin_lock_irqsave (& tqspi -> lock , flags );
12641269 tqspi -> curr_xfer = NULL ;
1270+ spin_unlock_irqrestore (& tqspi -> lock , flags );
12651271 transfer_phase ++ ;
12661272 }
12671273 ret = 0 ;
12681274
12691275exit :
1276+ spin_lock_irqsave (& tqspi -> lock , flags );
12701277 tqspi -> curr_xfer = NULL ;
1278+ spin_unlock_irqrestore (& tqspi -> lock , flags );
12711279 msg -> status = ret ;
12721280
12731281 return ret ;
@@ -1280,6 +1288,7 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
12801288 struct spi_transfer * transfer ;
12811289 bool is_first_msg = true;
12821290 int ret = 0 , val = 0 ;
1291+ unsigned long flags ;
12831292
12841293 msg -> status = 0 ;
12851294 msg -> actual_length = 0 ;
@@ -1360,7 +1369,9 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
13601369 msg -> actual_length += xfer -> len + dummy_bytes ;
13611370
13621371complete_xfer :
1372+ spin_lock_irqsave (& tqspi -> lock , flags );
13631373 tqspi -> curr_xfer = NULL ;
1374+ spin_unlock_irqrestore (& tqspi -> lock , flags );
13641375
13651376 if (ret < 0 ) {
13661377 tegra_qspi_transfer_end (spi );
@@ -1440,10 +1451,16 @@ static int tegra_qspi_transfer_one_message(struct spi_controller *host,
14401451
14411452static irqreturn_t handle_cpu_based_xfer (struct tegra_qspi * tqspi )
14421453{
1443- struct spi_transfer * t = tqspi -> curr_xfer ;
1454+ struct spi_transfer * t ;
14441455 unsigned long flags ;
14451456
14461457 spin_lock_irqsave (& tqspi -> lock , flags );
1458+ t = tqspi -> curr_xfer ;
1459+
1460+ if (!t ) {
1461+ spin_unlock_irqrestore (& tqspi -> lock , flags );
1462+ return IRQ_HANDLED ;
1463+ }
14471464
14481465 if (tqspi -> tx_status || tqspi -> rx_status ) {
14491466 tegra_qspi_handle_error (tqspi );
@@ -1474,7 +1491,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
14741491
14751492static irqreturn_t handle_dma_based_xfer (struct tegra_qspi * tqspi )
14761493{
1477- struct spi_transfer * t = tqspi -> curr_xfer ;
1494+ struct spi_transfer * t ;
14781495 unsigned int total_fifo_words ;
14791496 unsigned long flags ;
14801497 long wait_status ;
@@ -1513,6 +1530,12 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
15131530 }
15141531
15151532 spin_lock_irqsave (& tqspi -> lock , flags );
1533+ t = tqspi -> curr_xfer ;
1534+
1535+ if (!t ) {
1536+ spin_unlock_irqrestore (& tqspi -> lock , flags );
1537+ return IRQ_HANDLED ;
1538+ }
15161539
15171540 if (num_errors ) {
15181541 tegra_qspi_dma_unmap_xfer (tqspi , t );
@@ -1552,15 +1575,33 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
15521575static irqreturn_t tegra_qspi_isr_thread (int irq , void * context_data )
15531576{
15541577 struct tegra_qspi * tqspi = context_data ;
1578+ unsigned long flags ;
1579+ u32 status ;
1580+
1581+ /*
1582+ * Read transfer status to check if interrupt was triggered by transfer
1583+ * completion
1584+ */
1585+ status = tegra_qspi_readl (tqspi , QSPI_TRANS_STATUS );
15551586
15561587 /*
15571588 * Occasionally the IRQ thread takes a long time to wake up (usually
15581589 * when the CPU that it's running on is excessively busy) and we have
15591590 * already reached the timeout before and cleaned up the timed out
15601591 * transfer. Avoid any processing in that case and bail out early.
1592+ *
1593+ * If no transfer is in progress, check if this was a real interrupt
1594+ * that the timeout handler already processed, or a spurious one.
15611595 */
1562- if (!tqspi -> curr_xfer )
1563- return IRQ_NONE ;
1596+ spin_lock_irqsave (& tqspi -> lock , flags );
1597+ if (!tqspi -> curr_xfer ) {
1598+ spin_unlock_irqrestore (& tqspi -> lock , flags );
1599+ /* Spurious interrupt - transfer not ready */
1600+ if (!(status & QSPI_RDY ))
1601+ return IRQ_NONE ;
1602+ /* Real interrupt, already handled by timeout path */
1603+ return IRQ_HANDLED ;
1604+ }
15641605
15651606 tqspi -> status_reg = tegra_qspi_readl (tqspi , QSPI_FIFO_STATUS );
15661607
@@ -1571,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
15711612 tqspi -> rx_status = tqspi -> status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF );
15721613
15731614 tegra_qspi_mask_clear_irq (tqspi );
1615+ spin_unlock_irqrestore (& tqspi -> lock , flags );
15741616
1617+ /*
1618+ * Lock is released here but handlers safely re-check curr_xfer under
1619+ * lock before dereferencing.
1620+ * DMA handler also needs to sleep in wait_for_completion_*(), which
1621+ * cannot be done while holding spinlock.
1622+ */
15751623 if (!tqspi -> is_curr_dma_xfer )
15761624 return handle_cpu_based_xfer (tqspi );
15771625
0 commit comments