diff --git a/drivers/flash/flash_mspi_nor.c b/drivers/flash/flash_mspi_nor.c index e297b01f5d9..b7ca6341d3a 100644 --- a/drivers/flash/flash_mspi_nor.c +++ b/drivers/flash/flash_mspi_nor.c @@ -567,15 +567,20 @@ static int default_io_mode(const struct device *dev) enum mspi_io_mode io_mode = dev_config->mspi_nor_cfg.io_mode; int rc = 0; - /* For Quad 1-1-4 and 1-4-4, entering or leaving mode is defined in JEDEC216 BFP DW15 QER */ - if (io_mode == MSPI_IO_MODE_SINGLE) { - rc = quad_enable_set(dev, false); - } else if ((io_mode == MSPI_IO_MODE_QUAD_1_1_4) || (io_mode == MSPI_IO_MODE_QUAD_1_4_4)) { - rc = quad_enable_set(dev, true); - } + if (dev_config->dw15_qer != JESD216_DW15_QER_VAL_NONE) { + /* For Quad 1-1-4 and 1-4-4, entering or leaving mode is defined + * in JEDEC216 BFP DW15 QER + */ + if (io_mode == MSPI_IO_MODE_SINGLE) { + rc = quad_enable_set(dev, false); + } else if (io_mode == MSPI_IO_MODE_QUAD_1_1_4 || + io_mode == MSPI_IO_MODE_QUAD_1_4_4) { + rc = quad_enable_set(dev, true); + } - if (rc < 0) { - LOG_ERR("Failed to modify Quad Enable bit: %d", rc); + if (rc < 0) { + LOG_ERR("Failed to modify Quad Enable bit: %d", rc); + } } if ((dev_config->quirks != NULL) && (dev_config->quirks->post_switch_mode != NULL)) { @@ -646,8 +651,10 @@ static int flash_chip_init(const struct device *dev) /* Some chips reuse RESET pin for data in Quad modes: * force single line mode before resetting. */ - if ((io_mode == MSPI_IO_MODE_SINGLE) || (io_mode == MSPI_IO_MODE_QUAD_1_1_4) || - (io_mode == MSPI_IO_MODE_QUAD_1_4_4)) { + if (dev_config->dw15_qer != JESD216_DW15_QER_VAL_NONE && + (io_mode == MSPI_IO_MODE_SINGLE || + io_mode == MSPI_IO_MODE_QUAD_1_1_4 || + io_mode == MSPI_IO_MODE_QUAD_1_4_4)) { rc = quad_enable_set(dev, false); if (rc < 0) { diff --git a/drivers/flash/nrf_qspi_nor.c b/drivers/flash/nrf_qspi_nor.c index 12eeb3f4a3e..34b36b4d7e3 100644 --- a/drivers/flash/nrf_qspi_nor.c +++ b/drivers/flash/nrf_qspi_nor.c @@ -41,7 +41,7 @@ struct qspi_nor_data { */ volatile bool ready; #endif /* CONFIG_MULTITHREADING */ - bool xip_enabled; + uint32_t xip_users; }; struct qspi_nor_config { @@ -313,7 +313,7 @@ static void qspi_acquire(const struct device *dev) qspi_lock(dev); - if (!dev_data->xip_enabled) { + if (dev_data->xip_users == 0) { qspi_clock_div_change(); pm_device_busy_set(dev); @@ -331,7 +331,7 @@ static void qspi_release(const struct device *dev) deactivate = atomic_dec(&dev_data->usage_count) == 1; #endif - if (!dev_data->xip_enabled) { + if (dev_data->xip_users == 0) { qspi_clock_div_restore(); if (deactivate) { @@ -1344,35 +1344,54 @@ static int qspi_nor_pm_action(const struct device *dev, } #endif /* CONFIG_PM_DEVICE */ +static void on_xip_enable(const struct device *dev) +{ +#if NRF_QSPI_HAS_XIPEN + nrf_qspi_xip_set(NRF_QSPI, true); +#endif + (void)nrfx_qspi_activate(false); +} + +static void on_xip_disable(const struct device *dev) +{ + /* It turns out that when the QSPI peripheral is deactivated + * after a XIP transaction, it cannot be later successfully + * reactivated and an attempt to perform another XIP transaction + * results in the CPU being hung; even a debug session cannot be + * started then and the SoC has to be recovered. + * As a workaround, at least until the cause of such behavior + * is fully clarified, perform a simple non-XIP transaction + * (a read of the status register) before deactivating the QSPI. + * This prevents the issue from occurring. + */ + (void)qspi_rdsr(dev, 1); + +#if NRF_QSPI_HAS_XIPEN + nrf_qspi_xip_set(NRF_QSPI, false); +#endif +} + void z_impl_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable) { struct qspi_nor_data *dev_data = dev->data; - if (dev_data->xip_enabled == enable) { - return; - } - qspi_acquire(dev); -#if NRF_QSPI_HAS_XIPEN - nrf_qspi_xip_set(NRF_QSPI, enable); -#endif if (enable) { - (void)nrfx_qspi_activate(false); + if (dev_data->xip_users == 0) { + on_xip_enable(dev); + } + + ++dev_data->xip_users; + } else if (dev_data->xip_users == 0) { + LOG_ERR("Unbalanced XIP disabling"); } else { - /* It turns out that when the QSPI peripheral is deactivated - * after a XIP transaction, it cannot be later successfully - * reactivated and an attempt to perform another XIP transaction - * results in the CPU being hung; even a debug session cannot be - * started then and the SoC has to be recovered. - * As a workaround, at least until the cause of such behavior - * is fully clarified, perform a simple non-XIP transaction - * (a read of the status register) before deactivating the QSPI. - * This prevents the issue from occurring. - */ - (void)qspi_rdsr(dev, 1); + --dev_data->xip_users; + + if (dev_data->xip_users == 0) { + on_xip_disable(dev); + } } - dev_data->xip_enabled = enable; qspi_release(dev); } diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index 39cf463f4be..adc59e765b1 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -16,12 +16,9 @@ #include #include "mspi_dw.h" -#include "mspi_dw_vendor_specific.h" LOG_MODULE_REGISTER(mspi_dw, CONFIG_MSPI_LOG_LEVEL); -#define DUMMY_BYTE 0xAA - #if defined(CONFIG_MSPI_XIP) struct xip_params { uint32_t read_cmd; @@ -104,6 +101,7 @@ DEFINE_MM_REG_RD(rxflr, 0x24) DEFINE_MM_REG_RD(sr, 0x28) DEFINE_MM_REG_WR(imr, 0x2c) DEFINE_MM_REG_RD(isr, 0x30) +DEFINE_MM_REG_RD(risr, 0x34) DEFINE_MM_REG_RD_WR(dr, 0x60) DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4) @@ -116,6 +114,8 @@ DEFINE_MM_REG_WR(xip_write_wrap_inst, 0x144) DEFINE_MM_REG_WR(xip_write_ctrl, 0x148) #endif +#include "mspi_dw_vendor_specific.h" + static void tx_data(const struct device *dev, const struct mspi_xfer_packet *packet) { @@ -150,6 +150,9 @@ static void tx_data(const struct device *dev, write_dr(dev, data); if (buf_pos >= buf_end) { + /* Set the threshold to 0 to get the next interrupt + * when the FIFO is completely emptied. + */ write_txftlr(dev, 0); break; } @@ -163,35 +166,38 @@ static void tx_data(const struct device *dev, dev_data->buf_pos = (uint8_t *)buf_pos; } -static bool make_rx_cycles(const struct device *dev) +static bool tx_dummy_bytes(const struct device *dev) { struct mspi_dw_data *dev_data = dev->data; const struct mspi_dw_config *dev_config = dev->config; + uint8_t fifo_room = dev_config->tx_fifo_depth_minus_1 + 1 + - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev)); uint16_t dummy_bytes = dev_data->dummy_bytes; - /* See tx_data(). */ - uint32_t room = 1; - uint8_t tx_fifo_depth = dev_config->tx_fifo_depth_minus_1 + 1; + const uint8_t dummy_val = 0; - do { - write_dr(dev, DUMMY_BYTE); + if (dummy_bytes > fifo_room) { + dev_data->dummy_bytes = dummy_bytes - fifo_room; - --dummy_bytes; - if (!dummy_bytes) { - dev_data->dummy_bytes = 0; - return true; - } + do { + write_dr(dev, dummy_val); + } while (--fifo_room); - if (--room == 0) { - room = tx_fifo_depth - - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev)); - } - } while (room); + return false; + } - dev_data->dummy_bytes = dummy_bytes; - return false; + do { + write_dr(dev, dummy_val); + } while (--dummy_bytes); + + /* Set the threshold to 0 to get the next interrupt when the FIFO is + * completely emptied. + */ + write_txftlr(dev, 0); + + return true; } -static void read_rx_fifo(const struct device *dev, +static bool read_rx_fifo(const struct device *dev, const struct mspi_xfer_packet *packet) { struct mspi_dw_data *dev_data = dev->data; @@ -222,9 +228,8 @@ static void read_rx_fifo(const struct device *dev, } if (buf_pos >= buf_end) { - dev_data->bytes_to_discard = bytes_to_discard; dev_data->buf_pos = buf_pos; - return; + return true; } } @@ -241,6 +246,7 @@ static void read_rx_fifo(const struct device *dev, dev_data->bytes_to_discard = bytes_to_discard; dev_data->buf_pos = buf_pos; + return false; } static void mspi_dw_isr(const struct device *dev) @@ -248,32 +254,54 @@ static void mspi_dw_isr(const struct device *dev) struct mspi_dw_data *dev_data = dev->data; const struct mspi_xfer_packet *packet = &dev_data->xfer.packets[dev_data->packets_done]; - uint32_t int_status = read_isr(dev); + bool finished = false; - if (int_status & ISR_RXFIS_BIT) { - read_rx_fifo(dev, packet); - } + if (packet->dir == MSPI_TX) { + if (dev_data->buf_pos < dev_data->buf_end) { + tx_data(dev, packet); + } else { + /* It may happen that at this point the controller is + * still shifting out the last frame (the last interrupt + * occurs when the TX FIFO is empty). Wait if it signals + * that it is busy. + */ + while (read_sr(dev) & SR_BUSY_BIT) { + } - if (dev_data->buf_pos >= dev_data->buf_end) { - write_imr(dev, 0); - /* It may happen that at this point the controller is still - * shifting out the last frame (the last interrupt occurs when - * the TX FIFO is empty). Wait if it signals that it is busy. - */ - while (read_sr(dev) & SR_BUSY_BIT) { + finished = true; } - - k_sem_give(&dev_data->finished); } else { - if (int_status & ISR_TXEIS_BIT) { - if (dev_data->dummy_bytes) { - if (make_rx_cycles(dev)) { + uint32_t int_status = read_isr(dev); + + do { + if (int_status & ISR_RXFIS_BIT) { + if (read_rx_fifo(dev, packet)) { + finished = true; + break; + } + + if (read_risr(dev) & RISR_RXOIR_BIT) { + finished = true; + break; + } + + int_status = read_isr(dev); + } + + if (int_status & ISR_TXEIS_BIT) { + if (tx_dummy_bytes(dev)) { write_imr(dev, IMR_RXFIM_BIT); } - } else { - tx_data(dev, packet); + + int_status = read_isr(dev); } - } + } while (int_status); + } + + if (finished) { + write_imr(dev, 0); + + k_sem_give(&dev_data->finished); } vendor_specific_irq_clear(dev); @@ -372,7 +400,7 @@ static bool apply_cmd_length(struct mspi_dw_data *dev_data, uint32_t cmd_length) SPI_CTRLR0_INST_L16); break; default: - LOG_ERR("Command length %d not supported", cmd_length); + LOG_ERR("Command length %u not supported", cmd_length); return false; } @@ -382,6 +410,11 @@ static bool apply_cmd_length(struct mspi_dw_data *dev_data, uint32_t cmd_length) static bool apply_addr_length(struct mspi_dw_data *dev_data, uint32_t addr_length) { + if (addr_length > 4) { + LOG_ERR("Address length %u not supported", addr_length); + return false; + } + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_ADDR_L_MASK, addr_length * 2); @@ -493,7 +526,7 @@ static bool apply_xip_cmd_length(const struct mspi_dw_data *dev_data, XIP_WRITE_CTRL_INST_L16); break; default: - LOG_ERR("Command length %d not supported", cmd_length); + LOG_ERR("Command length %u not supported", cmd_length); return false; } @@ -505,6 +538,11 @@ static bool apply_xip_addr_length(const struct mspi_dw_data *dev_data, { uint8_t addr_length = dev_data->xip_params_active.addr_length; + if (addr_length > 4) { + LOG_ERR("Address length %u not supported", addr_length); + return false; + } + ctrl->read |= FIELD_PREP(XIP_CTRL_ADDR_L_MASK, addr_length * 2); ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_ADDR_L_MASK, addr_length * 2); @@ -738,7 +776,6 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) (dev_data->xip_enabled != 0), (false)); unsigned int key; - uint8_t tx_fifo_threshold; uint32_t packet_frames; uint32_t imr; int rc = 0; @@ -750,6 +787,7 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) } dev_data->dummy_bytes = 0; + dev_data->bytes_to_discard = 0; dev_data->ctrlr0 &= ~CTRLR0_TMOD_MASK & ~CTRLR0_DFS_MASK; @@ -790,7 +828,6 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) dev_data->xfer.tx_dummy); write_rxftlr(dev, 0); - tx_fifo_threshold = dev_config->tx_fifo_threshold; } else { uint32_t tmod; uint8_t rx_fifo_threshold; @@ -807,31 +844,34 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) (dev_data->xfer.cmd_length != 0 || dev_data->xfer.addr_length != 0)) { uint32_t rx_total_bytes; + uint32_t dummy_cycles = dev_data->xfer.rx_dummy; dev_data->bytes_to_discard = dev_data->xfer.cmd_length - + dev_data->xfer.addr_length; + + dev_data->xfer.addr_length + + dummy_cycles / 8; rx_total_bytes = dev_data->bytes_to_discard + packet->num_bytes; - dev_data->dummy_bytes = packet->num_bytes; + dev_data->dummy_bytes = dummy_cycles / 8 + + packet->num_bytes; imr = IMR_TXEIM_BIT | IMR_RXFIM_BIT; tmod = CTRLR0_TMOD_TX_RX; - tx_fifo_threshold = dev_config->tx_fifo_threshold; /* For standard SPI, only 1-byte frames are used. */ rx_fifo_threshold = MIN(rx_total_bytes - 1, dev_config->rx_fifo_threshold); } else { imr = IMR_RXFIM_BIT; tmod = CTRLR0_TMOD_RX; - tx_fifo_threshold = 0; rx_fifo_threshold = MIN(packet_frames - 1, dev_config->rx_fifo_threshold); + + dev_data->spi_ctrlr0 |= + FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, + dev_data->xfer.rx_dummy); } dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, tmod); - dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, - dev_data->xfer.rx_dummy); write_rxftlr(dev, FIELD_PREP(RXFTLR_RFT_MASK, rx_fifo_threshold)); @@ -870,23 +910,49 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) dev_data->buf_pos = packet->data_buf; dev_data->buf_end = &packet->data_buf[packet->num_bytes]; - if ((imr & IMR_TXEIM_BIT) && dev_data->buf_pos < dev_data->buf_end) { - uint32_t start_level = tx_fifo_threshold; + /* Set the TX FIFO threshold and its transmit start level. */ + if (packet->num_bytes) { + /* If there is some data to send/receive, set the threshold to + * the value configured for the driver instance and the start + * level to the maximum possible value (it will be updated later + * in tx_fifo() or tx_dummy_bytes() when TX is to be finished). + * This helps avoid a situation when the TX FIFO becomes empty + * before the transfer is complete and the SSI core finishes the + * transaction and deactivates the CE line. This could occur + * right before the data phase in enhanced SPI modes, when the + * clock stretching feature does not work yet, or in Standard + * SPI mode, where the clock stretching is not available at all. + */ + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, + dev_config->tx_fifo_depth_minus_1) | + FIELD_PREP(TXFTLR_TFT_MASK, + dev_config->tx_fifo_threshold)); + } else { + uint32_t total_tx_entries = 0; - if (dev_data->dummy_bytes) { - uint32_t tx_total = dev_data->bytes_to_discard - + dev_data->dummy_bytes; + /* It the whole transfer is to contain only the command and/or + * address, set up the transfer to start right after entries + * for those appear in the TX FIFO, and the threshold to 0, + * so that the interrupt occurs when the TX FIFO gets emptied. + */ + if (dev_data->xfer.cmd_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.cmd_length; + } else { + total_tx_entries += 1; + } + } - if (start_level > tx_total - 1) { - start_level = tx_total - 1; + if (dev_data->xfer.addr_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.addr_length; + } else { + total_tx_entries += 1; } } - write_txftlr(dev, - FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | - FIELD_PREP(TXFTLR_TFT_MASK, tx_fifo_threshold)); - } else { - write_txftlr(dev, 0); + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, + total_tx_entries - 1)); } /* Ensure that there will be no interrupt from the controller yet. */ @@ -894,6 +960,10 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) /* Enable the controller. This must be done before DR is written. */ write_ssienr(dev, SSIENR_SSIC_EN_BIT); + /* Since the FIFO depth in SSI is always at least 8, it can be safely + * assumed that the command and address fields (max. 2 and 4 bytes, + * respectively) can be written here before the TX FIFO gets filled up. + */ if (dev_data->standard_spi) { if (dev_data->xfer.cmd_length) { tx_control_field(dev, packet->cmd, @@ -914,19 +984,15 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) } } - if (dev_data->dummy_bytes) { - if (make_rx_cycles(dev)) { - imr = IMR_RXFIM_BIT; - } - } else if (packet->dir == MSPI_TX && packet->num_bytes) { - tx_data(dev, packet); - } - /* Enable interrupts now and wait until the packet is done. */ write_imr(dev, imr); rc = k_sem_take(&dev_data->finished, timeout); - if (rc < 0) { + if (read_risr(dev) & RISR_RXOIR_BIT) { + LOG_ERR("RX FIFO overflow occurred"); + rc = -EIO; + } else if (rc < 0) { + LOG_ERR("Transfer timed out"); rc = -ETIMEDOUT; } @@ -970,8 +1036,7 @@ static int _api_transceive(const struct device *dev, struct mspi_dw_data *dev_data = dev->data; int rc; - dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_WAIT_CYCLES_MASK - & ~SPI_CTRLR0_INST_L_MASK + dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_INST_L_MASK & ~SPI_CTRLR0_ADDR_L_MASK; if (!apply_cmd_length(dev_data, req->cmd_length) || @@ -979,10 +1044,15 @@ static int _api_transceive(const struct device *dev, return -EINVAL; } - if (dev_data->standard_spi && - (req->rx_dummy != 0 || req->tx_dummy != 0)) { - LOG_ERR("Dummy cycles unsupported in single line mode"); - return -EINVAL; + if (dev_data->standard_spi) { + if (req->tx_dummy) { + LOG_ERR("TX dummy cycles unsupported in single line mode"); + return -EINVAL; + } + if (req->rx_dummy % 8) { + LOG_ERR("Unsupported RX (%u) dummy cycles", req->rx_dummy); + return -EINVAL; + } } else if (req->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX || req->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) { LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles", @@ -1091,8 +1161,8 @@ static int _api_xip_config(const struct device *dev, return -EINVAL; } - if (params->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX || - params->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) { + if (params->rx_dummy > XIP_CTRL_WAIT_CYCLES_MAX || + params->tx_dummy > XIP_WRITE_CTRL_WAIT_CYCLES_MAX) { LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles", params->rx_dummy, params->tx_dummy); return -EINVAL; diff --git a/drivers/mspi/mspi_dw.h b/drivers/mspi/mspi_dw.h index bd81d061e09..c35778cb1c5 100644 --- a/drivers/mspi/mspi_dw.h +++ b/drivers/mspi/mspi_dw.h @@ -66,6 +66,19 @@ #define ISR_RXFIS_BIT BIT(4) #define ISR_MSTIS_BIT BIT(5) +/* RISR - Raw Interrupt Status Register */ +#define RISR_TXEIR_BIT BIT(0) +#define RISR_TXOIR_BIT BIT(1) +#define RISR_RXUIR_BIT BIT(2) +#define RISR_RXOIR_BIT BIT(3) +#define RISR_RXFIR_BIT BIT(4) +#define RISR_MSTIR_BIT BIT(5) +#define RISR_XRXOIR_BIT BIT(6) +#define RISR_TXUIR_BIT BIT(7) +#define RISR_AXIER_BIT BIT(8) +#define RISR_SPITER_BIT BIT(10) +#define RISR_DONER_BIT BIT(11) + /* SPI_CTRLR0 - SPI Control Register */ #define SPI_CTRLR0_CLK_STRETCH_EN_BIT BIT(30) #define SPI_CTRLR0_XIP_PREFETCH_EN_BIT BIT(29) @@ -127,21 +140,6 @@ #define XIP_CTRL_FRF_QUAD 2UL #define XIP_CTRL_FRF_OCTAL 3UL -/* XIP_CTRL - XIP Control Register */ -#define XIP_CTRL_XIP_PREFETCH_EN_BIT BIT(28) -#define XIP_CTRL_XIP_MBL_MASK GENMASK(27, 26) -#define XIP_CTRL_XIP_MBL_2 0UL -#define XIP_CTRL_XIP_MBL_4 1UL -#define XIP_CTRL_XIP_MBL_8 2UL -#define XIP_CTRL_XIP_MBL_16 3UL -#define XIP_CTRL_XIP_HYBERBUS_EN_BIT BIT(24) -#define XIP_CTRL_CONT_XFER_EN_BIT BIT(23) -#define XIP_CTRL_INST_EN_BIT BIT(22) -#define XIP_CTRL_RXDS_EN_BIT BIT(21) -#define XIP_CTRL_INST_DDR_EN_BIT BIT(20) -#define XIP_CTRL_DDR_EN_BIT BIT(19) -#define XIP_CTRL_DFS_HC_BIT BIT(18) - /* XIP_WRITE_CTRL - XIP Write Control Register */ #define XIP_WRITE_CTRL_WAIT_CYCLES_MASK GENMASK(20, 16) #define XIP_WRITE_CTRL_WAIT_CYCLES_MAX BIT_MASK(5) diff --git a/drivers/mspi/mspi_dw_vendor_specific.h b/drivers/mspi/mspi_dw_vendor_specific.h index 4913e536c92..e34d8a5db60 100644 --- a/drivers/mspi/mspi_dw_vendor_specific.h +++ b/drivers/mspi/mspi_dw_vendor_specific.h @@ -33,6 +33,17 @@ static inline void vendor_specific_resume(const struct device *dev) ARG_UNUSED(dev); NRF_EXMIF->TASKS_START = 1; + + /* Try to write an SSI register and wait until the write is successful + * to ensure that the clock that drives the SSI core is ready. + */ + uint32_t rxftlr = read_rxftlr(dev); + uint32_t rxftlr_mod = rxftlr ^ 1; + + do { + write_rxftlr(dev, rxftlr_mod); + rxftlr = read_rxftlr(dev); + } while (rxftlr != rxftlr_mod); } static inline void vendor_specific_irq_clear(const struct device *dev)