diff --git a/doc/hardware/peripherals/mspi.rst b/doc/hardware/peripherals/mspi.rst index 176c9bf5ede35..b9139f1ed1a88 100644 --- a/doc/hardware/peripherals/mspi.rst +++ b/doc/hardware/peripherals/mspi.rst @@ -100,10 +100,12 @@ whether to support scatter IO and callback management. The controller can determ which user callback to trigger based on :c:enum:`mspi_bus_event_cb_mask` upon completion of each async/sync transfer if the callback had been registered using :c:func:`mspi_register_callback`. Or not to trigger any callback at all with -:c:enum:`MSPI_BUS_NO_CB` even if the callbacks are already registered. -In which case that a controller supports hardware command queue, user could take full -advantage of the hardware performance if scatter IO and callback management are supported -by the driver implementation. +:c:enum:`MSPI_BUS_NO_CB` even if the callbacks are already registered. If the implemented +driver support it, the API supports :c:enum:`MSPI_BUS_XFER_COMPLETE_CB` to signal when a +transfer finishes and :c:enum:`MSPI_BUS_TIMEOUT_CB` to signal when a transfer or request +has timed-out. In which case that a controller supports hardware command queue, user could +take full advantage of the hardware performance if scatter IO and callback management are +supported by the driver implementation. Device Tree =========== @@ -192,6 +194,7 @@ Related configuration options: * :kconfig:option:`CONFIG_MSPI_TIMING` * :kconfig:option:`CONFIG_MSPI_INIT_PRIORITY` * :kconfig:option:`CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE` +* :kconfig:option:`CONFIG_MSPI_DMA` API Reference ************* diff --git a/drivers/mspi/Kconfig b/drivers/mspi/Kconfig index 269d8d16f04ac..a6442c1f70fea 100644 --- a/drivers/mspi/Kconfig +++ b/drivers/mspi/Kconfig @@ -55,6 +55,12 @@ config MSPI_TIMING Enables mspi_timing_config calls in device drivers for those controllers that need this to proper function at high frequencies. +config MSPI_DMA + bool "DMA support" + help + Enables DMA capabilities, depending on the driver and hardware it + runs on. + module = MSPI module-str = mspi source "subsys/logging/Kconfig.template.log_config" diff --git a/drivers/mspi/Kconfig.dw b/drivers/mspi/Kconfig.dw index cff148c138c6a..71302a801fdbd 100644 --- a/drivers/mspi/Kconfig.dw +++ b/drivers/mspi/Kconfig.dw @@ -7,13 +7,13 @@ config MSPI_DW default y depends on DT_HAS_SNPS_DESIGNWARE_SSI_ENABLED select PINCTRL if $(dt_compat_any_has_prop,$(DT_COMPAT_SNPS_DESIGNWARE_SSI),pinctrl-0) - imply MSPI_XIP imply MSPI_TIMING if MSPI_DW config MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE bool "Handle FIFO in system workqueue" + depends on MULTITHREADING help When the driver does not use DMA for transferring data to/from the SSI FIFOs, handling of those may take a significant amount of time. diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index 518b5605785f3..caeaf158f7931 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -66,26 +66,36 @@ struct mspi_dw_data { bool suspended; #if defined(CONFIG_MULTITHREADING) + const struct device *dev; + struct k_sem finished; /* For synchronization of API calls made from different contexts. */ struct k_sem ctx_lock; /* For locking of controller configuration. */ struct k_sem cfg_lock; + + struct k_timer async_timer; + struct k_work async_timeout_work; + struct k_work async_packet_work; + + mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX]; + struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX]; #else volatile bool finished; bool cfg_lock; #endif + struct mspi_xfer xfer; #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) struct k_work fifo_work; - const struct device *dev; uint32_t imr; #endif }; struct mspi_dw_config { DEVICE_MMIO_ROM; + void *wrapper_regs; void (*irq_config)(void); uint32_t clock_frequency; #if defined(CONFIG_PINCTRL) @@ -103,8 +113,14 @@ struct mspi_dw_config { uint8_t max_queued_dummy_bytes; uint8_t tx_fifo_threshold; uint8_t rx_fifo_threshold; +#ifdef CONFIG_MSPI_DMA + uint8_t dma_tx_data_level; + uint8_t dma_rx_data_level; +#endif + void *vendor_specific_data; DECLARE_REG_ACCESS(); bool sw_multi_periph; + enum mspi_op_mode op_mode; }; /* Register access helpers. */ @@ -129,6 +145,11 @@ DEFINE_MM_REG_RD_WR(dr, 0x60) DEFINE_MM_REG_WR(rx_sample_dly, 0xf0) DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4) DEFINE_MM_REG_WR(txd_drive_edge, 0xf8) +#if defined(CONFIG_MSPI_DMA) +DEFINE_MM_REG_WR(dmacr, 0x4C) +DEFINE_MM_REG_WR(dmatdlr, 0x50) +DEFINE_MM_REG_WR(dmardlr, 0x54) +#endif #if defined(CONFIG_MSPI_XIP) DEFINE_MM_REG_WR(xip_incr_inst, 0x100) @@ -141,6 +162,100 @@ DEFINE_MM_REG_WR(xip_write_ctrl, 0x148) #include "mspi_dw_vendor_specific.h" +static int start_next_packet(const struct device *dev); +static int finalize_packet(const struct device *dev, int rc); +static int finalize_transceive(const struct device *dev, int rc); + +#if defined(CONFIG_MULTITHREADING) +/* Common function to setup callback context and call user callback */ +static void call_user_callback_with_context(const struct device *dev, + enum mspi_bus_event evt_type, + uint32_t packet_idx, + int status) +{ + LOG_DBG("Calling user function with evt_type: %u", evt_type); + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[packet_idx]; + struct mspi_callback_context *cb_ctx = dev_data->cb_ctxs[evt_type]; + + if (!(packet->cb_mask & BIT(evt_type)) || + !dev_data->cbs[evt_type]) { + LOG_DBG("Callback type not setup"); + return; + } + + cb_ctx->mspi_evt.evt_type = evt_type; + cb_ctx->mspi_evt.evt_data.controller = dev; + cb_ctx->mspi_evt.evt_data.dev_id = dev_data->dev_id; + cb_ctx->mspi_evt.evt_data.packet = packet; + cb_ctx->mspi_evt.evt_data.packet_idx = packet_idx; + cb_ctx->mspi_evt.evt_data.status = status; + + dev_data->cbs[evt_type](cb_ctx); +} + +static void async_timeout_timer_handler(struct k_timer *timer) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(timer, struct mspi_dw_data, async_timer); + + /* Submit work to handle timeout in proper context */ + k_work_submit(&dev_data->async_timeout_work); +} + +static void async_timeout_work_handler(struct k_work *work) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(work, struct mspi_dw_data, async_timeout_work); + const struct device *dev = dev_data->dev; + int rc; + + LOG_ERR("Async transfer timed out"); + + rc = finalize_packet(dev, -ETIMEDOUT); + rc = finalize_transceive(dev, rc); + + /* Call user callback with timeout error (outside of any locks) */ + call_user_callback_with_context(dev, MSPI_BUS_TIMEOUT, + dev_data->packets_done, rc); +} + +static void async_packet_work_handler(struct k_work *work) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(work, struct mspi_dw_data, async_packet_work); + const struct device *dev = dev_data->dev; + uint32_t packet_idx = dev_data->packets_done; + int rc; + + LOG_DBG("Processing async work in thread context"); + + rc = finalize_packet(dev, 0); + if (rc >= 0) { + ++dev_data->packets_done; + if (dev_data->packets_done < dev_data->xfer.num_packet) { + LOG_DBG("Starting next packet (%d/%d)", + dev_data->packets_done + 1, + dev_data->xfer.num_packet); + + rc = start_next_packet(dev); + if (rc >= 0) { + return; + } + + ++packet_idx; + } + } + + rc = finalize_transceive(dev, rc); + call_user_callback_with_context(dev, + rc < 0 ? MSPI_BUS_ERROR + : MSPI_BUS_XFER_COMPLETE, + packet_idx, rc); +} +#endif /* defined(CONFIG_MULTITHREADING) */ + static void tx_data(const struct device *dev, const struct mspi_xfer_packet *packet) { @@ -316,6 +431,22 @@ static inline void set_imr(const struct device *dev, uint32_t imr) #endif } +static void handle_end_of_packet(struct mspi_dw_data *dev_data) + +{ +#if defined(CONFIG_MULTITHREADING) + if (dev_data->xfer.async) { + k_timer_stop(&dev_data->async_timer); + + k_work_submit(&dev_data->async_packet_work); + } else { + k_sem_give(&dev_data->finished); + } +#else + dev_data->finished = true; +#endif +} + static void handle_fifos(const struct device *dev) { struct mspi_dw_data *dev_data = dev->data; @@ -401,11 +532,8 @@ static void handle_fifos(const struct device *dev) if (finished) { set_imr(dev, 0); -#if defined(CONFIG_MULTITHREADING) - k_sem_give(&dev_data->finished); -#else - dev_data->finished = true; -#endif + handle_end_of_packet(dev_data); + } } @@ -424,6 +552,19 @@ static void fifo_work_handler(struct k_work *work) static void mspi_dw_isr(const struct device *dev) { +#if defined(CONFIG_MSPI_DMA) + struct mspi_dw_data *dev_data = dev->data; + + if (dev_data->xfer.xfer_mode == MSPI_DMA) { + if (vendor_specific_read_dma_irq(dev)) { + set_imr(dev, 0); + handle_end_of_packet(dev_data); + } + vendor_specific_irq_clear(dev); + return; + } +#endif + #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) struct mspi_dw_data *dev_data = dev->data; int rc; @@ -874,6 +1015,10 @@ static int api_dev_config(const struct device *dev, } dev_data->dev_id = dev_id; + +#if defined(CONFIG_MULTITHREADING) + memset(dev_data->cbs, 0, sizeof(dev_data->cbs)); +#endif } if (param_mask == MSPI_DEVICE_CONFIG_NONE && @@ -935,7 +1080,7 @@ static void tx_control_field(const struct device *dev, } while (shift); } -static int start_next_packet(const struct device *dev, k_timeout_t timeout) +static int start_next_packet(const struct device *dev) { const struct mspi_dw_config *dev_config = dev->config; struct mspi_dw_data *dev_data = dev->data; @@ -946,7 +1091,7 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) (false)); unsigned int key; uint32_t packet_frames; - uint32_t imr; + uint32_t imr = 0; int rc = 0; if (packet->num_bytes == 0 && @@ -994,6 +1139,18 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) return -EINVAL; } +#if defined(CONFIG_MSPI_DMA) + if (dev_data->xfer.xfer_mode == MSPI_DMA) { + /* Check if the packet buffer is accessible */ + if (packet->num_bytes > 0 && + !vendor_specific_dma_accessible_check(dev, packet->data_buf)) { + LOG_ERR("Buffer not DMA accessible: ptr=0x%lx, size=%u", + (uintptr_t)packet->data_buf, packet->num_bytes); + return -EINVAL; + } + } +#endif + if (packet->dir == MSPI_TX || packet->num_bytes == 0) { imr = IMR_TXEIM_BIT; dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, @@ -1002,6 +1159,12 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) dev_data->xfer.tx_dummy); write_rxftlr(dev, 0); +#if defined(CONFIG_MSPI_DMA) + } else if (dev_data->xfer.xfer_mode == MSPI_DMA) { + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, CTRLR0_TMOD_RX); + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, + dev_data->xfer.rx_dummy); +#endif } else { uint32_t tmod; uint8_t rx_fifo_threshold; @@ -1090,100 +1253,141 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) irq_unlock(key); } - dev_data->buf_pos = packet->data_buf; - dev_data->buf_end = &packet->data_buf[packet->num_bytes]; - - /* Set the TX FIFO threshold and its transmit start level. */ - if (packet->num_bytes) { - /* If there is some data to send/receive, set the threshold to - * the value configured for the driver instance and the start - * level to the maximum possible value (it will be updated later - * in tx_fifo() or tx_dummy_bytes() when TX is to be finished). - * This helps avoid a situation when the TX FIFO becomes empty - * before the transfer is complete and the SSI core finishes the - * transaction and deactivates the CE line. This could occur - * right before the data phase in enhanced SPI modes, when the - * clock stretching feature does not work yet, or in Standard - * SPI mode, where the clock stretching is not available at all. - */ - uint8_t start_level = dev_data->dummy_bytes != 0 - ? dev_config->max_queued_dummy_bytes - 1 - : dev_config->tx_fifo_depth_minus_1; + if (dev_data->xfer.xfer_mode == MSPI_PIO) { + dev_data->buf_pos = packet->data_buf; + dev_data->buf_end = &packet->data_buf[packet->num_bytes]; + /* Set the TX FIFO threshold and its transmit start level. */ + if (packet->num_bytes) { + /* If there is some data to send/receive, set the threshold to + * the value configured for the driver instance and the start + * level to the maximum possible value (it will be updated later + * in tx_fifo() or tx_dummy_bytes() when TX is to be finished). + * This helps avoid a situation when the TX FIFO becomes empty + * before the transfer is complete and the SSI core finishes the + * transaction and deactivates the CE line. This could occur + * right before the data phase in enhanced SPI modes, when the + * clock stretching feature does not work yet, or in Standard + * SPI mode, where the clock stretching is not available at all. + */ + uint8_t start_level = dev_data->dummy_bytes != 0 + ? dev_config->max_queued_dummy_bytes - 1 + : dev_config->tx_fifo_depth_minus_1; - write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | - FIELD_PREP(TXFTLR_TFT_MASK, - dev_config->tx_fifo_threshold)); - } else { - uint32_t total_tx_entries = 0; + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | + FIELD_PREP(TXFTLR_TFT_MASK, + dev_config->tx_fifo_threshold)); - /* It the whole transfer is to contain only the command and/or - * address, set up the transfer to start right after entries - * for those appear in the TX FIFO, and the threshold to 0, - * so that the interrupt occurs when the TX FIFO gets emptied. - */ - if (dev_data->xfer.cmd_length) { - if (dev_data->standard_spi) { - total_tx_entries += dev_data->xfer.cmd_length; - } else { - total_tx_entries += 1; + } else { + uint32_t total_tx_entries = 0; + + /* It the whole transfer is to contain only the command and/or + * address, set up the transfer to start right after entries + * for those appear in the TX FIFO, and the threshold to 0, + * so that the interrupt occurs when the TX FIFO gets emptied. + */ + if (dev_data->xfer.cmd_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.cmd_length; + } else { + total_tx_entries += 1; + } } - } - if (dev_data->xfer.addr_length) { - if (dev_data->standard_spi) { - total_tx_entries += dev_data->xfer.addr_length; - } else { - total_tx_entries += 1; + if (dev_data->xfer.addr_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.addr_length; + } else { + total_tx_entries += 1; + } } + + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, + total_tx_entries - 1)); } - write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, - total_tx_entries - 1)); - } + /* Ensure that there will be no interrupt from the controller yet. */ + write_imr(dev, 0); + /* Enable the controller. This must be done before DR is written. */ + write_ssienr(dev, SSIENR_SSIC_EN_BIT); - /* Ensure that there will be no interrupt from the controller yet. */ - write_imr(dev, 0); - /* Enable the controller. This must be done before DR is written. */ - write_ssienr(dev, SSIENR_SSIC_EN_BIT); + /* Since the FIFO depth in SSI is always at least 8, it can be safely + * assumed that the command and address fields (max. 2 and 4 bytes, + * respectively) can be written here before the TX FIFO gets filled up. + */ + if (dev_data->standard_spi) { + if (dev_data->xfer.cmd_length) { + tx_control_field(dev, packet->cmd, + dev_data->xfer.cmd_length); + } - /* Since the FIFO depth in SSI is always at least 8, it can be safely - * assumed that the command and address fields (max. 2 and 4 bytes, - * respectively) can be written here before the TX FIFO gets filled up. - */ - if (dev_data->standard_spi) { - if (dev_data->xfer.cmd_length) { - tx_control_field(dev, packet->cmd, - dev_data->xfer.cmd_length); - } + if (dev_data->xfer.addr_length) { + tx_control_field(dev, packet->address, + dev_data->xfer.addr_length); + } + } else { + if (dev_data->xfer.cmd_length) { + write_dr(dev, packet->cmd); + } - if (dev_data->xfer.addr_length) { - tx_control_field(dev, packet->address, - dev_data->xfer.addr_length); - } - } else { - if (dev_data->xfer.cmd_length) { - write_dr(dev, packet->cmd); + if (dev_data->xfer.addr_length) { + write_dr(dev, packet->address); + } } - if (dev_data->xfer.addr_length) { - write_dr(dev, packet->address); + /* Prefill TX FIFO with any data we can */ + if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) { + imr = IMR_RXFIM_BIT; + } else if (packet->dir == MSPI_TX && packet->num_bytes) { + tx_data(dev, packet); } - } - /* Prefill TX FIFO with any data we can */ - if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) { - imr = IMR_RXFIM_BIT; - } else if (packet->dir == MSPI_TX && packet->num_bytes) { - tx_data(dev, packet); + /* Enable interrupts now and wait until the packet is done unless async. */ + write_imr(dev, imr); + } +#if defined(CONFIG_MSPI_DMA) + else { + /* For DMA mode, set start level based on transfer length to prevent underflow */ + uint32_t total_transfer_bytes = packet->num_bytes + dev_data->xfer.addr_length + + dev_data->xfer.cmd_length; + uint32_t transfer_frames = total_transfer_bytes >> dev_data->bytes_per_frame_exp; - /* Enable interrupts now and wait until the packet is done. */ - write_imr(dev, imr); + /* Use minimum of transfer length or FIFO depth, but at least 1 */ + uint8_t dma_start_level = MIN(transfer_frames - 1, + dev_config->tx_fifo_depth_minus_1); + + dma_start_level = (dma_start_level > 0 ? dma_start_level : 1); + + /* Only TXFTHR needs to be set to the minimum number of frames */ + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, dma_start_level)); + write_dmatdlr(dev, FIELD_PREP(DMATDLR_DMATDL_MASK, dev_config->dma_tx_data_level)); + write_dmardlr(dev, FIELD_PREP(DMARDLR_DMARDL_MASK, dev_config->dma_rx_data_level)); + write_dmacr(dev, DMACR_TDMAE_BIT | DMACR_RDMAE_BIT); + write_imr(dev, 0); + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + + vendor_specific_start_dma_xfer(dev); + + } +#endif /* Write SER to start transfer */ write_ser(dev, BIT(dev_data->dev_id->dev_idx)); #if defined(CONFIG_MULTITHREADING) + k_timeout_t timeout = K_MSEC(dev_data->xfer.timeout); + + /* For async transfer, start the timeout timer and exit. */ + if (dev_data->xfer.async) { + k_timer_start(&dev_data->async_timer, timeout, K_NO_WAIT); + + return 0; + } + + /* For sync transfer, wait until the packet is finished. */ rc = k_sem_take(&dev_data->finished, timeout); + if (rc < 0) { + rc = -ETIMEDOUT; + } #else if (!WAIT_FOR(dev_data->finished, dev_data->xfer.timeout * USEC_PER_MSEC, @@ -1193,12 +1397,22 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) dev_data->finished = false; #endif + + return finalize_packet(dev, rc); +} + +static int finalize_packet(const struct device *dev, int rc) +{ + struct mspi_dw_data *dev_data = dev->data; + bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP, + (dev_data->xip_enabled != 0), + (false)); + if (read_risr(dev) & RISR_RXOIR_BIT) { LOG_ERR("RX FIFO overflow occurred"); rc = -EIO; - } else if (rc < 0) { + } else if (rc == -ETIMEDOUT) { LOG_ERR("Transfer timed out"); - rc = -ETIMEDOUT; } /* Disable the controller. This will immediately halt the transfer @@ -1207,10 +1421,10 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) if (xip_enabled) { /* If XIP is enabled, the controller must be kept enabled, * so disable it only momentarily if there's a need to halt - * a transfer that has timeout out. + * a transfer that ended up with an error. */ - if (rc == -ETIMEDOUT) { - key = irq_lock(); + if (rc < 0) { + unsigned int key = irq_lock(); write_ssienr(dev, 0); write_ssienr(dev, SSIENR_SSIC_EN_BIT); @@ -1220,13 +1434,14 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) } else { write_ssienr(dev, 0); } + /* Clear SER */ write_ser(dev, 0); if (dev_data->dev_id->ce.port) { int rc2; - /* Do not use `rc` to not overwrite potential timeout error. */ + /* Do not use `rc` to not overwrite potential packet error. */ rc2 = gpio_pin_set_dt(&dev_data->dev_id->ce, 0); if (rc2 < 0) { LOG_ERR("Failed to deactivate CE line (%d)", rc2); @@ -1269,10 +1484,18 @@ static int _api_transceive(const struct device *dev, dev_data->xfer = *req; + /* For async, only the first packet is started here, next ones, if any, + * are started by ISR. + */ + if (req->async) { + dev_data->packets_done = 0; + return start_next_packet(dev); + } + for (dev_data->packets_done = 0; dev_data->packets_done < dev_data->xfer.num_packet; dev_data->packets_done++) { - rc = start_next_packet(dev, K_MSEC(dev_data->xfer.timeout)); + rc = start_next_packet(dev); if (rc < 0) { return rc; } @@ -1286,16 +1509,15 @@ static int api_transceive(const struct device *dev, const struct mspi_xfer *req) { struct mspi_dw_data *dev_data = dev->data; - int rc, rc2; + int rc; if (dev_id != dev_data->dev_id) { LOG_ERR("Controller is not configured for this device"); return -EINVAL; } - /* TODO: add support for asynchronous transfers */ - if (req->async) { - LOG_ERR("Asynchronous transfers are not supported"); + if (req->async && !IS_ENABLED(CONFIG_MULTITHREADING)) { + LOG_ERR("Asynchronous transfers require multithreading"); return -ENOTSUP; } @@ -1315,7 +1537,20 @@ static int api_transceive(const struct device *dev, rc = _api_transceive(dev, req); } + if (req->async && rc >= 0) { + return rc; + } + + return finalize_transceive(dev, rc); +} + +static int finalize_transceive(const struct device *dev, int rc) +{ + int rc2; + #if defined(CONFIG_MULTITHREADING) + struct mspi_dw_data *dev_data = dev->data; + k_sem_give(&dev_data->ctx_lock); #endif @@ -1328,6 +1563,33 @@ static int api_transceive(const struct device *dev, return rc; } +#if defined(CONFIG_MULTITHREADING) +static int api_register_callback(const struct device *dev, + const struct mspi_dev_id *dev_id, + const enum mspi_bus_event evt_type, + mspi_callback_handler_t cb, + struct mspi_callback_context *ctx) +{ + struct mspi_dw_data *dev_data = dev->data; + + if (dev_id != dev_data->dev_id) { + LOG_ERR("Controller is not configured for this device"); + return -EINVAL; + } + + if (evt_type != MSPI_BUS_ERROR && + evt_type != MSPI_BUS_XFER_COMPLETE && + evt_type != MSPI_BUS_TIMEOUT) { + LOG_ERR("Callback type %d not supported", evt_type); + return -ENOTSUP; + } + + dev_data->cbs[evt_type] = cb; + dev_data->cb_ctxs[evt_type] = ctx; + return 0; +} +#endif /* defined(CONFIG_MULTITHREADING) */ + #if defined(CONFIG_MSPI_TIMING) static int api_timing_config(const struct device *dev, const struct mspi_dev_id *dev_id, @@ -1557,6 +1819,7 @@ static int dev_pm_action_cb(const struct device *dev, static int dev_init(const struct device *dev) { + struct mspi_dw_data *dev_data = dev->data; const struct mspi_dw_config *dev_config = dev->config; const struct gpio_dt_spec *ce_gpio; int rc; @@ -1565,18 +1828,23 @@ static int dev_init(const struct device *dev) vendor_specific_init(dev); + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SSI_IS_MST_BIT, + dev_config->op_mode == MSPI_OP_MODE_CONTROLLER); + dev_config->irq_config(); #if defined(CONFIG_MULTITHREADING) - struct mspi_dw_data *dev_data = dev->data; - + dev_data->dev = dev; k_sem_init(&dev_data->finished, 0, 1); k_sem_init(&dev_data->cfg_lock, 1, 1); k_sem_init(&dev_data->ctx_lock, 1, 1); + + k_timer_init(&dev_data->async_timer, async_timeout_timer_handler, NULL); + k_work_init(&dev_data->async_timeout_work, async_timeout_work_handler); + k_work_init(&dev_data->async_packet_work, async_packet_work_handler); #endif #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) - dev_data->dev = dev; k_work_init(&dev_data->fifo_work, fifo_work_handler); #endif @@ -1616,6 +1884,9 @@ static DEVICE_API(mspi, drv_api) = { .dev_config = api_dev_config, .get_channel_status = api_get_channel_status, .transceive = api_transceive, +#if defined(CONFIG_MULTITHREADING) + .register_callback = api_register_callback, +#endif #if defined(CONFIG_MSPI_TIMING) .timing_config = api_timing_config, #endif @@ -1655,6 +1926,11 @@ static DEVICE_API(mspi, drv_api) = { #define TX_FIFO_DEPTH(inst) DT_INST_PROP(inst, fifo_depth) #define RX_FIFO_DEPTH(inst) DT_INST_PROP_OR(inst, rx_fifo_depth, \ TX_FIFO_DEPTH(inst)) +#define MSPI_DW_DMA_DATA_LEVELS(inst) \ + .dma_tx_data_level = \ + DT_INST_PROP_OR(inst, dma_transmit_data_level, 0), \ + .dma_rx_data_level = \ + DT_INST_PROP_OR(inst, dma_receive_data_level, 0) #define MSPI_DW_FIFO_PROPS(inst) \ .tx_fifo_depth_minus_1 = TX_FIFO_DEPTH(inst) - 1, \ .max_queued_dummy_bytes = MIN(RX_FIFO_DEPTH(inst) - 1, \ @@ -1669,6 +1945,7 @@ static DEVICE_API(mspi, drv_api) = { #define MSPI_DW_INST(inst) \ PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \ + VENDOR_SPECIFIC_DATA_DEFINE(inst) \ static void irq_config##inst(void) \ { \ LISTIFY(DT_INST_NUM_IRQS(inst), \ @@ -1677,6 +1954,7 @@ static DEVICE_API(mspi, drv_api) = { static struct mspi_dw_data dev##inst##_data; \ static const struct mspi_dw_config dev##inst##_config = { \ MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \ + .wrapper_regs = (void *)DT_INST_REG_ADDR(inst), \ .irq_config = irq_config##inst, \ .clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \ IF_ENABLED(CONFIG_PINCTRL, \ @@ -1684,9 +1962,12 @@ static DEVICE_API(mspi, drv_api) = { IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \ (MSPI_DW_CE_GPIOS(inst),)) \ MSPI_DW_FIFO_PROPS(inst), \ + IF_ENABLED(CONFIG_MSPI_DMA, (MSPI_DW_DMA_DATA_LEVELS(inst),)) \ + .vendor_specific_data = VENDOR_SPECIFIC_DATA_GET(inst), \ DEFINE_REG_ACCESS(inst) \ .sw_multi_periph = \ DT_INST_PROP(inst, software_multiperipheral), \ + .op_mode = DT_INST_STRING_TOKEN(inst, op_mode), \ }; \ DEVICE_DT_INST_DEFINE(inst, \ dev_init, PM_DEVICE_DT_INST_GET(inst), \ diff --git a/drivers/mspi/mspi_dw.h b/drivers/mspi/mspi_dw.h index 28e4bed016e7c..0d015ae3db036 100644 --- a/drivers/mspi/mspi_dw.h +++ b/drivers/mspi/mspi_dw.h @@ -19,6 +19,7 @@ */ /* CTRLR0 - Control Register 0 */ +#define CTRLR0_SSI_IS_MST_BIT BIT(31) #define CTRLR0_SPI_FRF_MASK COND_CODE_1(SSI_VERSION_2, GENMASK(22, 21), GENMASK(23, 22)) #define CTRLR0_SPI_FRF_STANDARD 0UL #define CTRLR0_SPI_FRF_DUAL 1UL @@ -172,6 +173,22 @@ #define XIP_WRITE_CTRL_FRF_QUAD 2UL #define XIP_WRITE_CTRL_FRF_OCTAL 3UL +/* DMACR - DMA Control Register */ +#define DMACR_ATW_MASK GENMASK(4, 3) +#define DMACR_ATW_1 0UL +#define DMACR_ATW_2 1UL +#define DMACR_ATW_4 2UL +#define DMACR_ATW_8 3UL +#define DMACR_IDMAE_BIT BIT(2) +#define DMACR_TDMAE_BIT BIT(1) +#define DMACR_RDMAE_BIT BIT(0) + +/* DMATDLR - DMA Transmit Data Level */ +#define DMATDLR_DMATDL_MASK GENMASK(3, 0) + +/* DMARDLR - DMA Receive Data Level */ +#define DMARDLR_DMARDL_MASK GENMASK(3, 0) + /* Register access helpers. */ #define USES_AUX_REG(inst) + DT_INST_PROP(inst, aux_reg_enable) #define AUX_REG_INSTANCES (0 DT_INST_FOREACH_STATUS_OKAY(USES_AUX_REG)) diff --git a/drivers/mspi/mspi_dw_vendor_specific.h b/drivers/mspi/mspi_dw_vendor_specific.h index d32a53ac29393..762ab1ed3f973 100644 --- a/drivers/mspi/mspi_dw_vendor_specific.h +++ b/drivers/mspi/mspi_dw_vendor_specific.h @@ -14,6 +14,10 @@ #include +/* Empty macros for generic case - no vendor-specific data */ +#define VENDOR_SPECIFIC_DATA_DEFINE(inst) +#define VENDOR_SPECIFIC_DATA_GET(inst) NULL + static inline void vendor_specific_init(const struct device *dev) { ARG_UNUSED(dev); @@ -97,7 +101,217 @@ static inline int vendor_specific_xip_disable(const struct device *dev, } #endif /* defined(CONFIG_MSPI_XIP) */ -#else +#elif DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) +#define MSPI_DT_DRV_COMPAT nordic_nrf_qspi_v2 +#include + +static inline void vendor_specific_init(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->EVENTS_CORE = 0; + preg->EVENTS_DMA.DONE = 0; + + preg->INTENSET = BIT(QSPI_INTENSET_CORE_Pos) + | BIT(QSPI_INTENSET_DMADONE_Pos); +} + +static inline void vendor_specific_suspend(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->ENABLE = 0; +} + +static inline void vendor_specific_resume(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->ENABLE = 1; + +} + +static inline void vendor_specific_irq_clear(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->EVENTS_CORE = 0; + preg->EVENTS_DMA.DONE = 0; +} + +/* DMA support */ + +#define EVDMA_ATTR_LEN_Pos (0UL) +#define EVDMA_ATTR_LEN_Msk (0x00FFFFFFUL) + +#define EVDMA_ATTR_ATTR_Pos (24UL) +#define EVDMA_ATTR_ATTR_Msk (0x3FUL << EVDMA_ATTR_ATTR_Pos) + +#define EVDMA_ATTR_32AXI_Pos (30UL) +#define EVDMA_ATTR_32AXI_Msk (0x1UL << EVDMA_ATTR_32AXI_Pos) + +#define EVDMA_ATTR_EVENTS_Pos (31UL) +#define EVDMA_ATTR_EVENTS_Msk (0x1UL << EVDMA_ATTR_EVENTS_Pos) + +typedef enum { + EVDMA_BYTE_SWAP = 0, + EVDMA_JOBLIST = 1, + EVDMA_BUFFER_FILL = 2, + EVDMA_FIXED_ATTR = 3, + EVDMA_STATIC_ADDR = 4, + EVDMA_PLAIN_DATA_BUF_WR = 5, +} EVDMA_ATTR_Type; + +/* Setup EVDMA attribute with the following configuratrion */ +#define EVDMA_ATTRIBUTE (BIT(EVDMA_BYTE_SWAP) | BIT(EVDMA_JOBLIST) | \ + BIT(EVDMA_BUFFER_FILL) | BIT(EVDMA_FIXED_ATTR) | \ + BIT(EVDMA_STATIC_ADDR) | BIT(EVDMA_PLAIN_DATA_BUF_WR)) + + +typedef struct { + uint8_t *addr; + uint32_t attr; +} EVDMA_JOB_Type; + +#define EVDMA_JOB(BUFFER, SIZE, ATTR) \ + (EVDMA_JOB_Type) { .addr = (uint8_t *)BUFFER, .attr = (ATTR << EVDMA_ATTR_ATTR_Pos | SIZE) } +#define EVDMA_NULL_JOB() \ + (EVDMA_JOB_Type) { .addr = (uint8_t *)0, .attr = 0 } +typedef struct { + EVDMA_JOB_Type *tx_job; + EVDMA_JOB_Type *rx_job; +} QSPI_TRANSFER_LIST_Type; + +/* Number of jobs needed for transmit trasaction */ +#define MAX_NUM_JOBS 4 + +/* Vendor-specific data structure for Nordic QSPI */ +typedef struct { + QSPI_TRANSFER_LIST_Type *transfer_list; + EVDMA_JOB_Type *joblist; +} nordic_qspi_vendor_data_t; + +/* Static allocation macros for vendor-specific data */ +#define VENDOR_SPECIFIC_DATA_DEFINE(inst) \ + static QSPI_TRANSFER_LIST_Type mspi_dw_##inst##_transfer_list; \ + static EVDMA_JOB_Type mspi_dw_##inst##_joblist[MAX_NUM_JOBS]; \ + static nordic_qspi_vendor_data_t mspi_dw_##inst##_vendor_data = { \ + .transfer_list = &mspi_dw_##inst##_transfer_list, \ + .joblist = &mspi_dw_##inst##_joblist[0] \ + }; + +#define VENDOR_SPECIFIC_DATA_GET(inst) &mspi_dw_##inst##_vendor_data + +/* Temporarily hard-coded as not in MDK yet */ +#define QSPI_TMOD_OFFSET (0x490UL) +#define QSPI_TMOD_RX_ONLY (0x2) +static inline void vendor_specific_start_dma_xfer(const struct device *dev) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *config = dev->config; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[dev_data->packets_done]; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + /* Use vendor-specific data from config - stores job and transfer lists */ + nordic_qspi_vendor_data_t *vendor_data = (nordic_qspi_vendor_data_t *) + config->vendor_specific_data; + + + QSPI_TRANSFER_LIST_Type *transfer_list = vendor_data->transfer_list; + EVDMA_JOB_Type *joblist = vendor_data->joblist; + + int tmod = 0; + int job_idx = 0; + + if (packet->dir == MSPI_TX) { + preg->CONFIG.RXTRANSFERLENGTH = 0; + + /* Setting up EVDMA joblist depending on cmd, addr and data */ + + /* + * Command address will always have a length of 4 from the DMA's perspective, + * QSPI peripheral will use length of data specified in core registers + */ + if (dev_data->xfer.cmd_length > 0) { + joblist[job_idx++] = EVDMA_JOB(&packet->cmd, 4, EVDMA_ATTRIBUTE); + } + if (dev_data->xfer.addr_length > 0) { + joblist[job_idx++] = EVDMA_JOB(&packet->address, 4, EVDMA_ATTRIBUTE); + } + if (packet->num_bytes > 0) { + joblist[job_idx++] = EVDMA_JOB(packet->data_buf, packet->num_bytes, + EVDMA_ATTRIBUTE); + } + /* Always terminate with null job */ + joblist[job_idx] = EVDMA_NULL_JOB(); + /* tx_job should point to first valid job, or null if none */ + if (job_idx > 0) { + transfer_list->tx_job = &joblist[0]; + } else { + transfer_list->tx_job = &joblist[job_idx]; + } + + /* rx_job always EVDMA_NULL_JOB() for transmit */ + transfer_list->rx_job = &joblist[job_idx]; + tmod = 0; + } else { + preg->CONFIG.RXTRANSFERLENGTH = ((packet->num_bytes + dev_data->xfer.addr_length + + dev_data->xfer.cmd_length) >> + dev_data->bytes_per_frame_exp) - 1; + joblist[0] = EVDMA_JOB(packet->data_buf, packet->num_bytes, EVDMA_ATTRIBUTE); + joblist[1] = EVDMA_NULL_JOB(); + transfer_list->tx_job = &joblist[1]; + transfer_list->rx_job = &joblist[0]; + + tmod = QSPI_TMOD_RX_ONLY; + } + + /* + * In slave mode, a tmod register in the wrapper also needs to be set. Currently + * the address not in MDK so temp fix. + */ + uintptr_t tmod_addr = (uintptr_t)preg + QSPI_TMOD_OFFSET; + + sys_write32(tmod, tmod_addr); + + preg->CONFIG.TXBURSTLENGTH = (config->tx_fifo_depth_minus_1+1)-config->dma_tx_data_level; + preg->CONFIG.RXBURSTLENGTH = config->dma_rx_data_level+1; + preg->DMA.CONFIG.LISTPTR = (uint32_t)transfer_list; + + preg->INTENSET = BIT(QSPI_INTENSET_CORE_Pos) + | BIT(QSPI_INTENSET_DMADONE_Pos); + + preg->TASKS_START = 1; +} + +static inline bool vendor_specific_dma_accessible_check(const struct device *dev, + const uint8_t *data_buf) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + return nrf_dma_accessible_check(preg, data_buf); +} + +static inline bool vendor_specific_read_dma_irq(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + return (bool) preg->EVENTS_DMA.DONE; +} + +#else /* Supply empty vendor specific macros for generic case */ + +/* Empty macros for generic case - no vendor-specific data */ +#define VENDOR_SPECIFIC_DATA_DEFINE(inst) +#define VENDOR_SPECIFIC_DATA_GET(inst) NULL + static inline void vendor_specific_init(const struct device *dev) { ARG_UNUSED(dev); @@ -134,4 +348,24 @@ static inline int vendor_specific_xip_disable(const struct device *dev, return 0; } -#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */ +#if defined(CONFIG_MSPI_DMA) +static inline void vendor_specific_start_dma_xfer(const struct device *dev) +{ + ARG_UNUSED(dev); +} + +static inline bool vendor_specific_dma_accessible_check(const struct device *dev, + const uint8_t *data_buf) { + ARG_UNUSED(dev); + ARG_UNUSED(data_buf); + + return true; +} +static inline bool vendor_specific_read_dma_irq(const struct device *dev) +{ + ARG_UNUSED(dev); + + return true; +} +#endif /* defined(CONFIG_MSPI_DMA) */ +#endif /* Empty vendor specific macros */ diff --git a/drivers/pinctrl/pinctrl_nrf.c b/drivers/pinctrl/pinctrl_nrf.c index ac9b17d27b394..38c7cd017c9e5 100644 --- a/drivers/pinctrl/pinctrl_nrf.c +++ b/drivers/pinctrl/pinctrl_nrf.c @@ -539,6 +539,19 @@ int pinctrl_configure_pins(const pinctrl_soc_pin_t *pins, uint8_t pin_cnt, input = NRF_GPIO_PIN_INPUT_DISCONNECT; break; #endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */ +#if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) + /* No PSEL for QSPI_V2, pins only controlled by CTRLSEL */ + case NRF_FUN_QSPI_SCK: + case NRF_FUN_QSPI_CSN: + case NRF_FUN_QSPI_IO0: + case NRF_FUN_QSPI_IO1: + case NRF_FUN_QSPI_IO2: + case NRF_FUN_QSPI_IO3: + nrf_gpio_pin_control_select(psel, NRF_GPIO_PIN_SEL_QSPI); + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_CONNECT; + break; +#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) */ #if defined(NRF_PSEL_TWIS) case NRF_FUN_TWIS_SCL: NRF_PSEL_TWIS(reg, SCL) = psel; diff --git a/dts/bindings/mspi/mspi-controller.yaml b/dts/bindings/mspi/mspi-controller.yaml index 0f574c12153e3..7580602da3530 100644 --- a/dts/bindings/mspi/mspi-controller.yaml +++ b/dts/bindings/mspi/mspi-controller.yaml @@ -22,8 +22,8 @@ properties: op-mode: type: string enum: - - "MSPI_CONTROLLER" - - "MSPI_PERIPHERAL" + - "MSPI_OP_MODE_CONTROLLER" + - "MSPI_OP_MODE_PERIPHERAL" description: | Indicate MSPI controller or peripheral mode of the controller. The controller driver may use this during initialization. diff --git a/dts/bindings/mspi/nordic,nrf-exmif.yaml b/dts/bindings/mspi/nordic,nrf-exmif.yaml index 294254aa60efb..4e46e5d6b142d 100644 --- a/dts/bindings/mspi/nordic,nrf-exmif.yaml +++ b/dts/bindings/mspi/nordic,nrf-exmif.yaml @@ -6,3 +6,7 @@ description: Nordic External Memory Interface (EXMIF) compatible: "nordic,nrf-exmif" include: snps,designware-ssi.yaml + +properties: + op-mode: + default: "MSPI_OP_MODE_CONTROLLER" diff --git a/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml b/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml new file mode 100644 index 0000000000000..5ad8a6fd2452d --- /dev/null +++ b/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml @@ -0,0 +1,8 @@ +# Copyright (c) 2025 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +description: Nordic QSPI v2 Interface using SSI IP + +compatible: "nordic,nrf-qspi-v2" + +include: snps,designware-ssi.yaml diff --git a/dts/bindings/mspi/snps,designware-ssi.yaml b/dts/bindings/mspi/snps,designware-ssi.yaml index fb516cb783588..0a52653eb4ae7 100644 --- a/dts/bindings/mspi/snps,designware-ssi.yaml +++ b/dts/bindings/mspi/snps,designware-ssi.yaml @@ -43,3 +43,20 @@ properties: description: | Number of entries in the RX FIFO above which the controller gets an RX interrupt. Maximum value is the RX FIFO depth - 1. + + dma-transmit-data-level: + type: int + description: | + When in DMA mode, the transmit data level field controls the level at which a DMA request + is made by the transmit logic. A request to transmit is generated when the number of + valid data entries in the transmit FIFO is equal to or below this field value. Lower values + mean less frequent DMA triggers with larger bursts. Higher values mean fewer, smaller bursts + (lower latency, higher overhead). Range: 0-15 + + dma-receive-data-level: + type: int + description: | + When in DMA mode, the receive data level field controls the level at which a DMA request + is made by the receive logic. A request to receive is generated when the number of + valid data entries in the receive FIFO is greater than this value. Lower values mean + more frequent DMA triggers and higher values mean larger less frequent bursts. Range: 0-15 diff --git a/include/zephyr/drivers/mspi.h b/include/zephyr/drivers/mspi.h index c486f48a8ddfc..b30627efad78d 100644 --- a/include/zephyr/drivers/mspi.h +++ b/include/zephyr/drivers/mspi.h @@ -126,6 +126,8 @@ enum mspi_bus_event { MSPI_BUS_RESET = 0, MSPI_BUS_ERROR = 1, MSPI_BUS_XFER_COMPLETE = 2, + /** @brief When a request or xfer has timed out */ + MSPI_BUS_TIMEOUT = 3, MSPI_BUS_EVENT_MAX, }; @@ -139,6 +141,7 @@ enum mspi_bus_event_cb_mask { MSPI_BUS_RESET_CB = BIT(0), MSPI_BUS_ERROR_CB = BIT(1), MSPI_BUS_XFER_COMPLETE_CB = BIT(2), + MSPI_BUS_TIMEOUT_CB = BIT(3), }; /** diff --git a/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h b/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h index 92e62a9a6bed3..64f4d3fbce5ce 100644 --- a/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h +++ b/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h @@ -198,6 +198,18 @@ #define NRF_FUN_TPIU_DATA2 82U /** TPIU DATA3 */ #define NRF_FUN_TPIU_DATA3 83U +/** MSPI version 2 Chip select */ +#define NRF_FUN_MSPI_CSN 84U +/** MSPI version 2 Clock pin */ +#define NRF_FUN_MSPI_SCK 85U +/** MSPI version 2 Data pin 0 */ +#define NRF_FUN_MSPI_DQ0 86U +/** MSPI version 2 Data pin 1 */ +#define NRF_FUN_MSPI_DQ1 87U +/** MSPI version 2 Data pin 2 */ +#define NRF_FUN_MSPI_DQ2 88U +/** MSPI version 2 Data pin 3 */ +#define NRF_FUN_MSPI_DQ3 89U /** @} */