From 889f070f3a7f732bd352b56085e157e424d879cb Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Wed, 15 Oct 2025 18:15:25 +0100 Subject: [PATCH 1/9] [nrf fromtree] dts: nordic: Add nrf-qspi-v2 binding The nrf-qspi-v2 peripheral is similar to EXMIF on nrf54h20 but supports DMA and slave-mode. The wrapper around the SSI IP is also different with DMA features. Signed-off-by: David Jewsbury (cherry picked from commit 72dd539cbacd344fc6fdb79d82b9c8378dbe2aca) --- dts/bindings/mspi/nordic,nrf-qspi-v2.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 dts/bindings/mspi/nordic,nrf-qspi-v2.yaml diff --git a/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml b/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml new file mode 100644 index 00000000000..5ad8a6fd245 --- /dev/null +++ b/dts/bindings/mspi/nordic,nrf-qspi-v2.yaml @@ -0,0 +1,8 @@ +# Copyright (c) 2025 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +description: Nordic QSPI v2 Interface using SSI IP + +compatible: "nordic,nrf-qspi-v2" + +include: snps,designware-ssi.yaml From 6eaab123f0303d465fc215967f91a70bb857118d Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Wed, 13 Aug 2025 16:48:39 +0100 Subject: [PATCH 2/9] [nrf fromtree] drivers: pinctrl: nrf: add support for MSPI Support for new MSPI peripheral where there is no PSEL so pins are setup through CTRLSEL. Signed-off-by: David Jewsbury (cherry picked from commit ac94ca7894df365aa8bbfa4c640932bd8ba26cf2) --- drivers/pinctrl/pinctrl_nrf.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/pinctrl/pinctrl_nrf.c b/drivers/pinctrl/pinctrl_nrf.c index 465fe9961f7..3ad358158a8 100644 --- a/drivers/pinctrl/pinctrl_nrf.c +++ b/drivers/pinctrl/pinctrl_nrf.c @@ -499,6 +499,19 @@ int pinctrl_configure_pins(const pinctrl_soc_pin_t *pins, uint8_t pin_cnt, input = NRF_GPIO_PIN_INPUT_DISCONNECT; break; #endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */ +#if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) + /* No PSEL for QSPI_V2, pins only controlled by CTRLSEL */ + case NRF_FUN_QSPI_SCK: + case NRF_FUN_QSPI_CSN: + case NRF_FUN_QSPI_IO0: + case NRF_FUN_QSPI_IO1: + case NRF_FUN_QSPI_IO2: + case NRF_FUN_QSPI_IO3: + nrf_gpio_pin_control_select(psel, NRF_GPIO_PIN_SEL_QSPI); + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_CONNECT; + break; +#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) */ #if defined(NRF_PSEL_TWIS) case NRF_FUN_TWIS_SCL: NRF_PSEL_TWIS(reg, SCL) = psel; From e7f7c2ab84a603b8d0be2ff55e0ce040493a2dbc Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Thu, 4 Sep 2025 11:36:45 +0100 Subject: [PATCH 3/9] [nrf fromtree] drivers: mspi: Add timeout callback to MSPI API This is a new callback option that drivers can use for when a request or xfer has timed out. E.g if an Async RX request never received anything, this callback can be used so a user can clean up their application. Signed-off-by: David Jewsbury (cherry picked from commit a322957f59e5a7bd33a129e6203ad6446d6e6813) --- doc/hardware/peripherals/mspi.rst | 10 ++++++---- include/zephyr/drivers/mspi.h | 3 +++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/hardware/peripherals/mspi.rst b/doc/hardware/peripherals/mspi.rst index 176c9bf5ede..1f01857f32c 100644 --- a/doc/hardware/peripherals/mspi.rst +++ b/doc/hardware/peripherals/mspi.rst @@ -100,10 +100,12 @@ whether to support scatter IO and callback management. The controller can determ which user callback to trigger based on :c:enum:`mspi_bus_event_cb_mask` upon completion of each async/sync transfer if the callback had been registered using :c:func:`mspi_register_callback`. Or not to trigger any callback at all with -:c:enum:`MSPI_BUS_NO_CB` even if the callbacks are already registered. -In which case that a controller supports hardware command queue, user could take full -advantage of the hardware performance if scatter IO and callback management are supported -by the driver implementation. +:c:enum:`MSPI_BUS_NO_CB` even if the callbacks are already registered. If the implemented +driver support it, the API supports :c:enum:`MSPI_BUS_XFER_COMPLETE_CB` to signal when a +transfer finishes and :c:enum:`MSPI_BUS_TIMEOUT_CB` to signal when a transfer or request +has timed-out. In which case that a controller supports hardware command queue, user could +take full advantage of the hardware performance if scatter IO and callback management are +supported by the driver implementation. Device Tree =========== diff --git a/include/zephyr/drivers/mspi.h b/include/zephyr/drivers/mspi.h index c486f48a8dd..b30627efad7 100644 --- a/include/zephyr/drivers/mspi.h +++ b/include/zephyr/drivers/mspi.h @@ -126,6 +126,8 @@ enum mspi_bus_event { MSPI_BUS_RESET = 0, MSPI_BUS_ERROR = 1, MSPI_BUS_XFER_COMPLETE = 2, + /** @brief When a request or xfer has timed out */ + MSPI_BUS_TIMEOUT = 3, MSPI_BUS_EVENT_MAX, }; @@ -139,6 +141,7 @@ enum mspi_bus_event_cb_mask { MSPI_BUS_RESET_CB = BIT(0), MSPI_BUS_ERROR_CB = BIT(1), MSPI_BUS_XFER_COMPLETE_CB = BIT(2), + MSPI_BUS_TIMEOUT_CB = BIT(3), }; /** From ae2daeb3622ddd9bb6101b6b7dd0f260a4346ac6 Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Tue, 26 Aug 2025 17:15:11 +0100 Subject: [PATCH 4/9] [nrf fromtree] dts: mspi: Align op-mode binding with mspi.h enum enum mspi_op_mode in mspi.h has different syntax to this binding. Aligning these will allow for cleaner code in the implmented drivers. Signed-off-by: David Jewsbury (cherry picked from commit 032ca4c894d9239ae7c2753ea4edc75c8e84ea3d) --- dts/bindings/mspi/mspi-controller.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dts/bindings/mspi/mspi-controller.yaml b/dts/bindings/mspi/mspi-controller.yaml index 6f22d0dc0bf..dd7c99e34ec 100644 --- a/dts/bindings/mspi/mspi-controller.yaml +++ b/dts/bindings/mspi/mspi-controller.yaml @@ -22,8 +22,8 @@ properties: op-mode: type: string enum: - - "MSPI_CONTROLLER" - - "MSPI_PERIPHERAL" + - "MSPI_OP_MODE_CONTROLLER" + - "MSPI_OP_MODE_PERIPHERAL" description: | Indicate MSPI controller or peripheral mode of the controller. The controller driver may use this during initialization. From e153daf5531727fcb1dabce37ee7aeef1d8f112a Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Thu, 9 Oct 2025 16:45:00 +0200 Subject: [PATCH 5/9] [nrf fromtree] drivers: mspi_dw: Add support for asynchronous transfers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Handling of asynchronous transfers uses the system workqueue, hence they are not available when multithreading is disabled. Also add missing dependency on multithreading in the MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE Kconfig option. Signed-off-by: David Jewsbury Signed-off-by: Andrzej Głąbek (cherry picked from commit 2f1ee737b362ea697834ddf8f8a11ccb50e4659d) --- drivers/mspi/Kconfig.dw | 1 + drivers/mspi/mspi_dw.c | 238 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 219 insertions(+), 20 deletions(-) diff --git a/drivers/mspi/Kconfig.dw b/drivers/mspi/Kconfig.dw index cff148c138c..485ea6c1a42 100644 --- a/drivers/mspi/Kconfig.dw +++ b/drivers/mspi/Kconfig.dw @@ -14,6 +14,7 @@ if MSPI_DW config MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE bool "Handle FIFO in system workqueue" + depends on MULTITHREADING help When the driver does not use DMA for transferring data to/from the SSI FIFOs, handling of those may take a significant amount of time. diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index 518b5605785..e8c9a817095 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -66,20 +66,29 @@ struct mspi_dw_data { bool suspended; #if defined(CONFIG_MULTITHREADING) + const struct device *dev; + struct k_sem finished; /* For synchronization of API calls made from different contexts. */ struct k_sem ctx_lock; /* For locking of controller configuration. */ struct k_sem cfg_lock; + + struct k_timer async_timer; + struct k_work async_timeout_work; + struct k_work async_packet_work; + + mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX]; + struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX]; #else volatile bool finished; bool cfg_lock; #endif + struct mspi_xfer xfer; #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) struct k_work fifo_work; - const struct device *dev; uint32_t imr; #endif }; @@ -141,6 +150,100 @@ DEFINE_MM_REG_WR(xip_write_ctrl, 0x148) #include "mspi_dw_vendor_specific.h" +static int start_next_packet(const struct device *dev); +static int finalize_packet(const struct device *dev, int rc); +static int finalize_transceive(const struct device *dev, int rc); + +#if defined(CONFIG_MULTITHREADING) +/* Common function to setup callback context and call user callback */ +static void call_user_callback_with_context(const struct device *dev, + enum mspi_bus_event evt_type, + uint32_t packet_idx, + int status) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[packet_idx]; + struct mspi_callback_context *cb_ctx = dev_data->cb_ctxs[evt_type]; + + if (!(packet->cb_mask & BIT(evt_type)) || + !dev_data->cbs[evt_type]) { + return; + } + + LOG_DBG("Calling user function with evt_type: %u", evt_type); + + cb_ctx->mspi_evt.evt_type = evt_type; + cb_ctx->mspi_evt.evt_data.controller = dev; + cb_ctx->mspi_evt.evt_data.dev_id = dev_data->dev_id; + cb_ctx->mspi_evt.evt_data.packet = packet; + cb_ctx->mspi_evt.evt_data.packet_idx = packet_idx; + cb_ctx->mspi_evt.evt_data.status = status; + + dev_data->cbs[evt_type](cb_ctx); +} + +static void async_timeout_timer_handler(struct k_timer *timer) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(timer, struct mspi_dw_data, async_timer); + + /* Submit work to handle timeout in proper context */ + k_work_submit(&dev_data->async_timeout_work); +} + +static void async_timeout_work_handler(struct k_work *work) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(work, struct mspi_dw_data, async_timeout_work); + const struct device *dev = dev_data->dev; + int rc; + + LOG_ERR("Async transfer timed out"); + + rc = finalize_packet(dev, -ETIMEDOUT); + rc = finalize_transceive(dev, rc); + + /* Call user callback with timeout error (outside of any locks) */ + call_user_callback_with_context(dev, MSPI_BUS_TIMEOUT, + dev_data->packets_done, rc); +} + +static void async_packet_work_handler(struct k_work *work) +{ + struct mspi_dw_data *dev_data = + CONTAINER_OF(work, struct mspi_dw_data, async_packet_work); + const struct device *dev = dev_data->dev; + uint32_t packet_idx = dev_data->packets_done; + int rc; + + LOG_DBG("Processing async work in thread context"); + + rc = finalize_packet(dev, 0); + if (rc >= 0) { + ++dev_data->packets_done; + if (dev_data->packets_done < dev_data->xfer.num_packet) { + LOG_DBG("Starting next packet (%d/%d)", + dev_data->packets_done + 1, + dev_data->xfer.num_packet); + + rc = start_next_packet(dev); + if (rc >= 0) { + return; + } + + ++packet_idx; + } + } + + rc = finalize_transceive(dev, rc); + call_user_callback_with_context(dev, + rc < 0 ? MSPI_BUS_ERROR + : MSPI_BUS_XFER_COMPLETE, + packet_idx, rc); +} +#endif /* defined(CONFIG_MULTITHREADING) */ + static void tx_data(const struct device *dev, const struct mspi_xfer_packet *packet) { @@ -316,6 +419,22 @@ static inline void set_imr(const struct device *dev, uint32_t imr) #endif } +static void handle_end_of_packet(struct mspi_dw_data *dev_data) + +{ +#if defined(CONFIG_MULTITHREADING) + if (dev_data->xfer.async) { + k_timer_stop(&dev_data->async_timer); + + k_work_submit(&dev_data->async_packet_work); + } else { + k_sem_give(&dev_data->finished); + } +#else + dev_data->finished = true; +#endif +} + static void handle_fifos(const struct device *dev) { struct mspi_dw_data *dev_data = dev->data; @@ -401,11 +520,8 @@ static void handle_fifos(const struct device *dev) if (finished) { set_imr(dev, 0); -#if defined(CONFIG_MULTITHREADING) - k_sem_give(&dev_data->finished); -#else - dev_data->finished = true; -#endif + handle_end_of_packet(dev_data); + } } @@ -874,6 +990,10 @@ static int api_dev_config(const struct device *dev, } dev_data->dev_id = dev_id; + +#if defined(CONFIG_MULTITHREADING) + memset(dev_data->cbs, 0, sizeof(dev_data->cbs)); +#endif } if (param_mask == MSPI_DEVICE_CONFIG_NONE && @@ -935,7 +1055,7 @@ static void tx_control_field(const struct device *dev, } while (shift); } -static int start_next_packet(const struct device *dev, k_timeout_t timeout) +static int start_next_packet(const struct device *dev) { const struct mspi_dw_config *dev_config = dev->config; struct mspi_dw_data *dev_data = dev->data; @@ -1177,13 +1297,26 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) tx_data(dev, packet); } - /* Enable interrupts now and wait until the packet is done. */ + /* Enable interrupts now */ write_imr(dev, imr); /* Write SER to start transfer */ write_ser(dev, BIT(dev_data->dev_id->dev_idx)); #if defined(CONFIG_MULTITHREADING) + k_timeout_t timeout = K_MSEC(dev_data->xfer.timeout); + + /* For async transfer, start the timeout timer and exit. */ + if (dev_data->xfer.async) { + k_timer_start(&dev_data->async_timer, timeout, K_NO_WAIT); + + return 0; + } + + /* For sync transfer, wait until the packet is finished. */ rc = k_sem_take(&dev_data->finished, timeout); + if (rc < 0) { + rc = -ETIMEDOUT; + } #else if (!WAIT_FOR(dev_data->finished, dev_data->xfer.timeout * USEC_PER_MSEC, @@ -1193,12 +1326,22 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) dev_data->finished = false; #endif + + return finalize_packet(dev, rc); +} + +static int finalize_packet(const struct device *dev, int rc) +{ + struct mspi_dw_data *dev_data = dev->data; + bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP, + (dev_data->xip_enabled != 0), + (false)); + if (read_risr(dev) & RISR_RXOIR_BIT) { LOG_ERR("RX FIFO overflow occurred"); rc = -EIO; - } else if (rc < 0) { + } else if (rc == -ETIMEDOUT) { LOG_ERR("Transfer timed out"); - rc = -ETIMEDOUT; } /* Disable the controller. This will immediately halt the transfer @@ -1207,10 +1350,10 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) if (xip_enabled) { /* If XIP is enabled, the controller must be kept enabled, * so disable it only momentarily if there's a need to halt - * a transfer that has timeout out. + * a transfer that ended up with an error. */ - if (rc == -ETIMEDOUT) { - key = irq_lock(); + if (rc < 0) { + unsigned int key = irq_lock(); write_ssienr(dev, 0); write_ssienr(dev, SSIENR_SSIC_EN_BIT); @@ -1220,13 +1363,14 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout) } else { write_ssienr(dev, 0); } + /* Clear SER */ write_ser(dev, 0); if (dev_data->dev_id->ce.port) { int rc2; - /* Do not use `rc` to not overwrite potential timeout error. */ + /* Do not use `rc` to not overwrite potential packet error. */ rc2 = gpio_pin_set_dt(&dev_data->dev_id->ce, 0); if (rc2 < 0) { LOG_ERR("Failed to deactivate CE line (%d)", rc2); @@ -1269,10 +1413,18 @@ static int _api_transceive(const struct device *dev, dev_data->xfer = *req; + /* For async, only the first packet is started here, next ones, if any, + * are started by ISR. + */ + if (req->async) { + dev_data->packets_done = 0; + return start_next_packet(dev); + } + for (dev_data->packets_done = 0; dev_data->packets_done < dev_data->xfer.num_packet; dev_data->packets_done++) { - rc = start_next_packet(dev, K_MSEC(dev_data->xfer.timeout)); + rc = start_next_packet(dev); if (rc < 0) { return rc; } @@ -1286,16 +1438,15 @@ static int api_transceive(const struct device *dev, const struct mspi_xfer *req) { struct mspi_dw_data *dev_data = dev->data; - int rc, rc2; + int rc; if (dev_id != dev_data->dev_id) { LOG_ERR("Controller is not configured for this device"); return -EINVAL; } - /* TODO: add support for asynchronous transfers */ - if (req->async) { - LOG_ERR("Asynchronous transfers are not supported"); + if (req->async && !IS_ENABLED(CONFIG_MULTITHREADING)) { + LOG_ERR("Asynchronous transfers require multithreading"); return -ENOTSUP; } @@ -1315,7 +1466,20 @@ static int api_transceive(const struct device *dev, rc = _api_transceive(dev, req); } + if (req->async && rc >= 0) { + return rc; + } + + return finalize_transceive(dev, rc); +} + +static int finalize_transceive(const struct device *dev, int rc) +{ + int rc2; + #if defined(CONFIG_MULTITHREADING) + struct mspi_dw_data *dev_data = dev->data; + k_sem_give(&dev_data->ctx_lock); #endif @@ -1328,6 +1492,33 @@ static int api_transceive(const struct device *dev, return rc; } +#if defined(CONFIG_MULTITHREADING) +static int api_register_callback(const struct device *dev, + const struct mspi_dev_id *dev_id, + const enum mspi_bus_event evt_type, + mspi_callback_handler_t cb, + struct mspi_callback_context *ctx) +{ + struct mspi_dw_data *dev_data = dev->data; + + if (dev_id != dev_data->dev_id) { + LOG_ERR("Controller is not configured for this device"); + return -EINVAL; + } + + if (evt_type != MSPI_BUS_ERROR && + evt_type != MSPI_BUS_XFER_COMPLETE && + evt_type != MSPI_BUS_TIMEOUT) { + LOG_ERR("Callback type %d not supported", evt_type); + return -ENOTSUP; + } + + dev_data->cbs[evt_type] = cb; + dev_data->cb_ctxs[evt_type] = ctx; + return 0; +} +#endif /* defined(CONFIG_MULTITHREADING) */ + #if defined(CONFIG_MSPI_TIMING) static int api_timing_config(const struct device *dev, const struct mspi_dev_id *dev_id, @@ -1570,13 +1761,17 @@ static int dev_init(const struct device *dev) #if defined(CONFIG_MULTITHREADING) struct mspi_dw_data *dev_data = dev->data; + dev_data->dev = dev; k_sem_init(&dev_data->finished, 0, 1); k_sem_init(&dev_data->cfg_lock, 1, 1); k_sem_init(&dev_data->ctx_lock, 1, 1); + + k_timer_init(&dev_data->async_timer, async_timeout_timer_handler, NULL); + k_work_init(&dev_data->async_timeout_work, async_timeout_work_handler); + k_work_init(&dev_data->async_packet_work, async_packet_work_handler); #endif #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) - dev_data->dev = dev; k_work_init(&dev_data->fifo_work, fifo_work_handler); #endif @@ -1616,6 +1811,9 @@ static DEVICE_API(mspi, drv_api) = { .dev_config = api_dev_config, .get_channel_status = api_get_channel_status, .transceive = api_transceive, +#if defined(CONFIG_MULTITHREADING) + .register_callback = api_register_callback, +#endif #if defined(CONFIG_MSPI_TIMING) .timing_config = api_timing_config, #endif From cb89b334df70d52d4a860b00de23afbf63b1e8c4 Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Mon, 13 Oct 2025 11:50:55 +0100 Subject: [PATCH 6/9] [nrf fromtree] drivers: mspi_dw: Add support for slave mode MSPI slave mode is selected through devicetree using the op-mode property. Mode selected by SSIISMST bit in the CTRLR0 register. EXMIF can only be Master (controller). Signed-off-by: David Jewsbury (cherry picked from commit a18fd950045dc6e1fe415710fd4392ead30e46da) --- drivers/mspi/mspi_dw.c | 8 ++++++-- drivers/mspi/mspi_dw.h | 1 + dts/bindings/mspi/nordic,nrf-exmif.yaml | 4 ++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index e8c9a817095..cefcb5e5956 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -114,6 +114,7 @@ struct mspi_dw_config { uint8_t rx_fifo_threshold; DECLARE_REG_ACCESS(); bool sw_multi_periph; + enum mspi_op_mode op_mode; }; /* Register access helpers. */ @@ -1748,6 +1749,7 @@ static int dev_pm_action_cb(const struct device *dev, static int dev_init(const struct device *dev) { + struct mspi_dw_data *dev_data = dev->data; const struct mspi_dw_config *dev_config = dev->config; const struct gpio_dt_spec *ce_gpio; int rc; @@ -1756,11 +1758,12 @@ static int dev_init(const struct device *dev) vendor_specific_init(dev); + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SSI_IS_MST_BIT, + dev_config->op_mode == MSPI_OP_MODE_CONTROLLER); + dev_config->irq_config(); #if defined(CONFIG_MULTITHREADING) - struct mspi_dw_data *dev_data = dev->data; - dev_data->dev = dev; k_sem_init(&dev_data->finished, 0, 1); k_sem_init(&dev_data->cfg_lock, 1, 1); @@ -1885,6 +1888,7 @@ static DEVICE_API(mspi, drv_api) = { DEFINE_REG_ACCESS(inst) \ .sw_multi_periph = \ DT_INST_PROP(inst, software_multiperipheral), \ + .op_mode = DT_INST_STRING_TOKEN(inst, op_mode), \ }; \ DEVICE_DT_INST_DEFINE(inst, \ dev_init, PM_DEVICE_DT_INST_GET(inst), \ diff --git a/drivers/mspi/mspi_dw.h b/drivers/mspi/mspi_dw.h index 28e4bed016e..894bee40d6c 100644 --- a/drivers/mspi/mspi_dw.h +++ b/drivers/mspi/mspi_dw.h @@ -19,6 +19,7 @@ */ /* CTRLR0 - Control Register 0 */ +#define CTRLR0_SSI_IS_MST_BIT BIT(31) #define CTRLR0_SPI_FRF_MASK COND_CODE_1(SSI_VERSION_2, GENMASK(22, 21), GENMASK(23, 22)) #define CTRLR0_SPI_FRF_STANDARD 0UL #define CTRLR0_SPI_FRF_DUAL 1UL diff --git a/dts/bindings/mspi/nordic,nrf-exmif.yaml b/dts/bindings/mspi/nordic,nrf-exmif.yaml index 294254aa60e..4e46e5d6b14 100644 --- a/dts/bindings/mspi/nordic,nrf-exmif.yaml +++ b/dts/bindings/mspi/nordic,nrf-exmif.yaml @@ -6,3 +6,7 @@ description: Nordic External Memory Interface (EXMIF) compatible: "nordic,nrf-exmif" include: snps,designware-ssi.yaml + +properties: + op-mode: + default: "MSPI_OP_MODE_CONTROLLER" From cc463ff206f57e7d9753967297555448ad154232 Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Tue, 14 Oct 2025 11:05:37 +0100 Subject: [PATCH 7/9] [nrf fromtree] drivers: mspi: mspi_dw: Add DMA support Initial DMA support. DMA supports implementation of SSI IP but using vendor specific DMA in the wrapper. The setup of the DMA is done in mspi_dw_vendor_specific.h. Signed-off-by: David Jewsbury (cherry picked from commit d9677bbd7b54c22aa6fbaa75c67478f22e42b33f) --- doc/hardware/peripherals/mspi.rst | 1 + drivers/mspi/Kconfig | 6 + drivers/mspi/Kconfig.dw | 1 - drivers/mspi/mspi_dw.c | 227 +++++++++++++------ drivers/mspi/mspi_dw.h | 16 ++ drivers/mspi/mspi_dw_vendor_specific.h | 247 ++++++++++++++++++++- dts/bindings/mspi/snps,designware-ssi.yaml | 17 ++ 7 files changed, 439 insertions(+), 76 deletions(-) diff --git a/doc/hardware/peripherals/mspi.rst b/doc/hardware/peripherals/mspi.rst index 1f01857f32c..b9139f1ed1a 100644 --- a/doc/hardware/peripherals/mspi.rst +++ b/doc/hardware/peripherals/mspi.rst @@ -194,6 +194,7 @@ Related configuration options: * :kconfig:option:`CONFIG_MSPI_TIMING` * :kconfig:option:`CONFIG_MSPI_INIT_PRIORITY` * :kconfig:option:`CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE` +* :kconfig:option:`CONFIG_MSPI_DMA` API Reference ************* diff --git a/drivers/mspi/Kconfig b/drivers/mspi/Kconfig index 269d8d16f04..a6442c1f70f 100644 --- a/drivers/mspi/Kconfig +++ b/drivers/mspi/Kconfig @@ -55,6 +55,12 @@ config MSPI_TIMING Enables mspi_timing_config calls in device drivers for those controllers that need this to proper function at high frequencies. +config MSPI_DMA + bool "DMA support" + help + Enables DMA capabilities, depending on the driver and hardware it + runs on. + module = MSPI module-str = mspi source "subsys/logging/Kconfig.template.log_config" diff --git a/drivers/mspi/Kconfig.dw b/drivers/mspi/Kconfig.dw index 485ea6c1a42..71302a801fd 100644 --- a/drivers/mspi/Kconfig.dw +++ b/drivers/mspi/Kconfig.dw @@ -7,7 +7,6 @@ config MSPI_DW default y depends on DT_HAS_SNPS_DESIGNWARE_SSI_ENABLED select PINCTRL if $(dt_compat_any_has_prop,$(DT_COMPAT_SNPS_DESIGNWARE_SSI),pinctrl-0) - imply MSPI_XIP imply MSPI_TIMING if MSPI_DW diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index cefcb5e5956..d4700a99261 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -95,6 +95,7 @@ struct mspi_dw_data { struct mspi_dw_config { DEVICE_MMIO_ROM; + void *wrapper_regs; void (*irq_config)(void); uint32_t clock_frequency; #if defined(CONFIG_PINCTRL) @@ -112,6 +113,11 @@ struct mspi_dw_config { uint8_t max_queued_dummy_bytes; uint8_t tx_fifo_threshold; uint8_t rx_fifo_threshold; +#ifdef CONFIG_MSPI_DMA + uint8_t dma_tx_data_level; + uint8_t dma_rx_data_level; +#endif + void *vendor_specific_data; DECLARE_REG_ACCESS(); bool sw_multi_periph; enum mspi_op_mode op_mode; @@ -139,6 +145,11 @@ DEFINE_MM_REG_RD_WR(dr, 0x60) DEFINE_MM_REG_WR(rx_sample_dly, 0xf0) DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4) DEFINE_MM_REG_WR(txd_drive_edge, 0xf8) +#if defined(CONFIG_MSPI_DMA) +DEFINE_MM_REG_WR(dmacr, 0x4C) +DEFINE_MM_REG_WR(dmatdlr, 0x50) +DEFINE_MM_REG_WR(dmardlr, 0x54) +#endif #if defined(CONFIG_MSPI_XIP) DEFINE_MM_REG_WR(xip_incr_inst, 0x100) @@ -541,6 +552,19 @@ static void fifo_work_handler(struct k_work *work) static void mspi_dw_isr(const struct device *dev) { +#if defined(CONFIG_MSPI_DMA) + struct mspi_dw_data *dev_data = dev->data; + + if (dev_data->xfer.xfer_mode == MSPI_DMA) { + if (vendor_specific_read_dma_irq(dev)) { + set_imr(dev, 0); + handle_end_of_packet(dev_data); + } + vendor_specific_irq_clear(dev); + return; + } +#endif + #if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE) struct mspi_dw_data *dev_data = dev->data; int rc; @@ -1067,7 +1091,7 @@ static int start_next_packet(const struct device *dev) (false)); unsigned int key; uint32_t packet_frames; - uint32_t imr; + uint32_t imr = 0; int rc = 0; if (packet->num_bytes == 0 && @@ -1115,6 +1139,18 @@ static int start_next_packet(const struct device *dev) return -EINVAL; } +#if defined(CONFIG_MSPI_DMA) + if (dev_data->xfer.xfer_mode == MSPI_DMA) { + /* Check if the packet buffer is accessible */ + if (packet->num_bytes > 0 && + !vendor_specific_dma_accessible_check(dev, packet->data_buf)) { + LOG_ERR("Buffer not DMA accessible: ptr=0x%lx, size=%u", + (uintptr_t)packet->data_buf, packet->num_bytes); + return -EINVAL; + } + } +#endif + if (packet->dir == MSPI_TX || packet->num_bytes == 0) { imr = IMR_TXEIM_BIT; dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, @@ -1123,6 +1159,12 @@ static int start_next_packet(const struct device *dev) dev_data->xfer.tx_dummy); write_rxftlr(dev, 0); +#if defined(CONFIG_MSPI_DMA) + } else if (dev_data->xfer.xfer_mode == MSPI_DMA) { + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, CTRLR0_TMOD_RX); + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, + dev_data->xfer.rx_dummy); +#endif } else { uint32_t tmod; uint8_t rx_fifo_threshold; @@ -1211,95 +1253,124 @@ static int start_next_packet(const struct device *dev) irq_unlock(key); } - dev_data->buf_pos = packet->data_buf; - dev_data->buf_end = &packet->data_buf[packet->num_bytes]; - - /* Set the TX FIFO threshold and its transmit start level. */ - if (packet->num_bytes) { - /* If there is some data to send/receive, set the threshold to - * the value configured for the driver instance and the start - * level to the maximum possible value (it will be updated later - * in tx_fifo() or tx_dummy_bytes() when TX is to be finished). - * This helps avoid a situation when the TX FIFO becomes empty - * before the transfer is complete and the SSI core finishes the - * transaction and deactivates the CE line. This could occur - * right before the data phase in enhanced SPI modes, when the - * clock stretching feature does not work yet, or in Standard - * SPI mode, where the clock stretching is not available at all. - */ - uint8_t start_level = dev_data->dummy_bytes != 0 - ? dev_config->max_queued_dummy_bytes - 1 - : dev_config->tx_fifo_depth_minus_1; +#if defined(CONFIG_MSPI_DMA) + if (dev_data->xfer.xfer_mode == MSPI_DMA) { + /* For DMA mode, set start level based on transfer length to prevent underflow */ + uint32_t total_transfer_bytes = packet->num_bytes + dev_data->xfer.addr_length + + dev_data->xfer.cmd_length; + uint32_t transfer_frames = total_transfer_bytes >> dev_data->bytes_per_frame_exp; - write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | - FIELD_PREP(TXFTLR_TFT_MASK, - dev_config->tx_fifo_threshold)); + /* Use minimum of transfer length or FIFO depth, but at least 1 */ + uint8_t dma_start_level = MIN(transfer_frames - 1, + dev_config->tx_fifo_depth_minus_1); + + dma_start_level = (dma_start_level > 0 ? dma_start_level : 1); + + /* Only TXFTHR needs to be set to the minimum number of frames */ + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, dma_start_level)); + write_dmatdlr(dev, FIELD_PREP(DMATDLR_DMATDL_MASK, dev_config->dma_tx_data_level)); + write_dmardlr(dev, FIELD_PREP(DMARDLR_DMARDL_MASK, dev_config->dma_rx_data_level)); + write_dmacr(dev, DMACR_TDMAE_BIT | DMACR_RDMAE_BIT); + write_imr(dev, 0); + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + + vendor_specific_start_dma_xfer(dev); } else { - uint32_t total_tx_entries = 0; +#endif + /* PIO mode */ + dev_data->buf_pos = packet->data_buf; + dev_data->buf_end = &packet->data_buf[packet->num_bytes]; + /* Set the TX FIFO threshold and its transmit start level. */ + if (packet->num_bytes) { + /* If there is some data to send/receive, set the threshold to + * the value configured for the driver instance and the start + * level to the maximum possible value (it will be updated later + * in tx_fifo() or tx_dummy_bytes() when TX is to be finished). + * This helps avoid a situation when the TX FIFO becomes empty + * before the transfer is complete and the SSI core finishes the + * transaction and deactivates the CE line. This could occur + * right before the data phase in enhanced SPI modes, when the + * clock stretching feature does not work yet, or in Standard + * SPI mode, where the clock stretching is not available at all. + */ + uint8_t start_level = dev_data->dummy_bytes != 0 + ? dev_config->max_queued_dummy_bytes - 1 + : dev_config->tx_fifo_depth_minus_1; - /* It the whole transfer is to contain only the command and/or - * address, set up the transfer to start right after entries - * for those appear in the TX FIFO, and the threshold to 0, - * so that the interrupt occurs when the TX FIFO gets emptied. - */ - if (dev_data->xfer.cmd_length) { - if (dev_data->standard_spi) { - total_tx_entries += dev_data->xfer.cmd_length; - } else { - total_tx_entries += 1; + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | + FIELD_PREP(TXFTLR_TFT_MASK, + dev_config->tx_fifo_threshold)); + + } else { + uint32_t total_tx_entries = 0; + + /* It the whole transfer is to contain only the command and/or + * address, set up the transfer to start right after entries + * for those appear in the TX FIFO, and the threshold to 0, + * so that the interrupt occurs when the TX FIFO gets emptied. + */ + if (dev_data->xfer.cmd_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.cmd_length; + } else { + total_tx_entries += 1; + } } - } - if (dev_data->xfer.addr_length) { - if (dev_data->standard_spi) { - total_tx_entries += dev_data->xfer.addr_length; - } else { - total_tx_entries += 1; + if (dev_data->xfer.addr_length) { + if (dev_data->standard_spi) { + total_tx_entries += dev_data->xfer.addr_length; + } else { + total_tx_entries += 1; + } } + + write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, + total_tx_entries - 1)); } - write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, - total_tx_entries - 1)); - } + /* Ensure that there will be no interrupt from the controller yet. */ + write_imr(dev, 0); + /* Enable the controller. This must be done before DR is written. */ + write_ssienr(dev, SSIENR_SSIC_EN_BIT); - /* Ensure that there will be no interrupt from the controller yet. */ - write_imr(dev, 0); - /* Enable the controller. This must be done before DR is written. */ - write_ssienr(dev, SSIENR_SSIC_EN_BIT); + /* Since the FIFO depth in SSI is always at least 8, it can be safely + * assumed that the command and address fields (max. 2 and 4 bytes, + * respectively) can be written here before the TX FIFO gets filled up. + */ + if (dev_data->standard_spi) { + if (dev_data->xfer.cmd_length) { + tx_control_field(dev, packet->cmd, + dev_data->xfer.cmd_length); + } - /* Since the FIFO depth in SSI is always at least 8, it can be safely - * assumed that the command and address fields (max. 2 and 4 bytes, - * respectively) can be written here before the TX FIFO gets filled up. - */ - if (dev_data->standard_spi) { - if (dev_data->xfer.cmd_length) { - tx_control_field(dev, packet->cmd, - dev_data->xfer.cmd_length); - } + if (dev_data->xfer.addr_length) { + tx_control_field(dev, packet->address, + dev_data->xfer.addr_length); + } + } else { + if (dev_data->xfer.cmd_length) { + write_dr(dev, packet->cmd); + } - if (dev_data->xfer.addr_length) { - tx_control_field(dev, packet->address, - dev_data->xfer.addr_length); - } - } else { - if (dev_data->xfer.cmd_length) { - write_dr(dev, packet->cmd); + if (dev_data->xfer.addr_length) { + write_dr(dev, packet->address); + } } - if (dev_data->xfer.addr_length) { - write_dr(dev, packet->address); + /* Prefill TX FIFO with any data we can */ + if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) { + imr = IMR_RXFIM_BIT; + } else if (packet->dir == MSPI_TX && packet->num_bytes) { + tx_data(dev, packet); } - } - /* Prefill TX FIFO with any data we can */ - if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) { - imr = IMR_RXFIM_BIT; - } else if (packet->dir == MSPI_TX && packet->num_bytes) { - tx_data(dev, packet); + /* Enable interrupts now and wait until the packet is done unless async. */ + write_imr(dev, imr); +#if defined(CONFIG_MSPI_DMA) } +#endif - /* Enable interrupts now */ - write_imr(dev, imr); /* Write SER to start transfer */ write_ser(dev, BIT(dev_data->dev_id->dev_idx)); @@ -1867,9 +1938,16 @@ static DEVICE_API(mspi, drv_api) = { DT_INST_PROP_OR(inst, rx_fifo_threshold, \ 1 * RX_FIFO_DEPTH(inst) / 8 - 1) +#define MSPI_DW_DMA_DATA_LEVELS(inst) \ + .dma_tx_data_level = \ + DT_INST_PROP_OR(inst, dma_transmit_data_level, 0), \ + .dma_rx_data_level = \ + DT_INST_PROP_OR(inst, dma_receive_data_level, 0) + #define MSPI_DW_INST(inst) \ PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \ + VENDOR_SPECIFIC_DATA_DEFINE(inst); \ static void irq_config##inst(void) \ { \ LISTIFY(DT_INST_NUM_IRQS(inst), \ @@ -1878,6 +1956,7 @@ static DEVICE_API(mspi, drv_api) = { static struct mspi_dw_data dev##inst##_data; \ static const struct mspi_dw_config dev##inst##_config = { \ MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \ + .wrapper_regs = (void *)DT_INST_REG_ADDR(inst), \ .irq_config = irq_config##inst, \ .clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \ IF_ENABLED(CONFIG_PINCTRL, \ @@ -1885,6 +1964,8 @@ static DEVICE_API(mspi, drv_api) = { IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \ (MSPI_DW_CE_GPIOS(inst),)) \ MSPI_DW_FIFO_PROPS(inst), \ + IF_ENABLED(CONFIG_MSPI_DMA, (MSPI_DW_DMA_DATA_LEVELS(inst),)) \ + .vendor_specific_data = VENDOR_SPECIFIC_DATA_GET(inst), \ DEFINE_REG_ACCESS(inst) \ .sw_multi_periph = \ DT_INST_PROP(inst, software_multiperipheral), \ diff --git a/drivers/mspi/mspi_dw.h b/drivers/mspi/mspi_dw.h index 894bee40d6c..0d015ae3db0 100644 --- a/drivers/mspi/mspi_dw.h +++ b/drivers/mspi/mspi_dw.h @@ -173,6 +173,22 @@ #define XIP_WRITE_CTRL_FRF_QUAD 2UL #define XIP_WRITE_CTRL_FRF_OCTAL 3UL +/* DMACR - DMA Control Register */ +#define DMACR_ATW_MASK GENMASK(4, 3) +#define DMACR_ATW_1 0UL +#define DMACR_ATW_2 1UL +#define DMACR_ATW_4 2UL +#define DMACR_ATW_8 3UL +#define DMACR_IDMAE_BIT BIT(2) +#define DMACR_TDMAE_BIT BIT(1) +#define DMACR_RDMAE_BIT BIT(0) + +/* DMATDLR - DMA Transmit Data Level */ +#define DMATDLR_DMATDL_MASK GENMASK(3, 0) + +/* DMARDLR - DMA Receive Data Level */ +#define DMARDLR_DMARDL_MASK GENMASK(3, 0) + /* Register access helpers. */ #define USES_AUX_REG(inst) + DT_INST_PROP(inst, aux_reg_enable) #define AUX_REG_INSTANCES (0 DT_INST_FOREACH_STATUS_OKAY(USES_AUX_REG)) diff --git a/drivers/mspi/mspi_dw_vendor_specific.h b/drivers/mspi/mspi_dw_vendor_specific.h index d32a53ac293..0b1fcf1ca7e 100644 --- a/drivers/mspi/mspi_dw_vendor_specific.h +++ b/drivers/mspi/mspi_dw_vendor_specific.h @@ -97,7 +97,221 @@ static inline int vendor_specific_xip_disable(const struct device *dev, } #endif /* defined(CONFIG_MSPI_XIP) */ -#else +#elif DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_qspi_v2) +#include + +static inline void vendor_specific_init(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->EVENTS_CORE = 0; + preg->EVENTS_DMA.DONE = 0; + + preg->INTENSET = BIT(QSPI_INTENSET_CORE_Pos) + | BIT(QSPI_INTENSET_DMADONE_Pos); +} + +static inline void vendor_specific_suspend(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->ENABLE = 0; +} + +static inline void vendor_specific_resume(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->ENABLE = 1; + +} + +static inline void vendor_specific_irq_clear(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + preg->EVENTS_CORE = 0; + preg->EVENTS_DMA.DONE = 0; +} + +/* DMA support */ + +#define EVDMA_ATTR_LEN_Pos (0UL) +#define EVDMA_ATTR_LEN_Msk (0x00FFFFFFUL) + +#define EVDMA_ATTR_ATTR_Pos (24UL) +#define EVDMA_ATTR_ATTR_Msk (0x3FUL << EVDMA_ATTR_ATTR_Pos) + +#define EVDMA_ATTR_32AXI_Pos (30UL) +#define EVDMA_ATTR_32AXI_Msk (0x1UL << EVDMA_ATTR_32AXI_Pos) + +#define EVDMA_ATTR_EVENTS_Pos (31UL) +#define EVDMA_ATTR_EVENTS_Msk (0x1UL << EVDMA_ATTR_EVENTS_Pos) + +typedef enum { + EVDMA_BYTE_SWAP = 0, + EVDMA_JOBLIST = 1, + EVDMA_BUFFER_FILL = 2, + EVDMA_FIXED_ATTR = 3, + EVDMA_STATIC_ADDR = 4, + EVDMA_PLAIN_DATA_BUF_WR = 5, +} EVDMA_ATTR_Type; + +/* Setup EVDMA attribute with the following configuratrion */ +#define EVDMA_ATTRIBUTE (BIT(EVDMA_BYTE_SWAP) | BIT(EVDMA_JOBLIST) | \ + BIT(EVDMA_BUFFER_FILL) | BIT(EVDMA_FIXED_ATTR) | \ + BIT(EVDMA_STATIC_ADDR) | BIT(EVDMA_PLAIN_DATA_BUF_WR)) + +typedef struct { + uint8_t *addr; + uint32_t attr; +} EVDMA_JOB_Type; + +#define EVDMA_JOB(BUFFER, SIZE, ATTR) \ + (EVDMA_JOB_Type) { .addr = (uint8_t *)BUFFER, .attr = (ATTR << EVDMA_ATTR_ATTR_Pos | SIZE) } +#define EVDMA_NULL_JOB() \ + (EVDMA_JOB_Type) { .addr = (uint8_t *)0, .attr = 0 } +typedef struct { + EVDMA_JOB_Type *tx_job; + EVDMA_JOB_Type *rx_job; +} QSPI_TRANSFER_LIST_Type; + +/* Number of jobs needed for transmit trasaction */ +#define MAX_NUM_JOBS 5 + +/* Vendor-specific data structure for Nordic QSPI */ +typedef struct { + QSPI_TRANSFER_LIST_Type *transfer_list; + EVDMA_JOB_Type *joblist; +} nordic_qspi_vendor_data_t; + +/* Static allocation macros for vendor-specific data */ +#define VENDOR_SPECIFIC_DATA_DEFINE(inst) \ + static QSPI_TRANSFER_LIST_Type mspi_dw_##inst##_transfer_list; \ + static EVDMA_JOB_Type mspi_dw_##inst##_joblist[MAX_NUM_JOBS]; \ + static const nordic_qspi_vendor_data_t mspi_dw_##inst##_vendor_data = { \ + .transfer_list = &mspi_dw_##inst##_transfer_list, \ + .joblist = &mspi_dw_##inst##_joblist[0] \ + }; + +#define VENDOR_SPECIFIC_DATA_GET(inst) (void *)&mspi_dw_##inst##_vendor_data + +/* Temporarily hard-coded as not in MDK yet */ +#define QSPI_TMOD_OFFSET (0x490UL) +#define QSPI_TMOD_TX_AND_RX (0x0) +#define QSPI_TMOD_TX_ONLY (0x1) +#define QSPI_TMOD_RX_ONLY (0x2) +static inline void vendor_specific_start_dma_xfer(const struct device *dev) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *config = dev->config; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[dev_data->packets_done]; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + /* Use vendor-specific data from config - stores job and transfer lists */ + const nordic_qspi_vendor_data_t *vendor_data = (const nordic_qspi_vendor_data_t *) + config->vendor_specific_data; + + QSPI_TRANSFER_LIST_Type *transfer_list = vendor_data->transfer_list; + EVDMA_JOB_Type *joblist = vendor_data->joblist; + + int tmod = 0; + int job_idx = 0; + + /* Set up tx job pointer to the first job */ + transfer_list->tx_job = &joblist[0]; + + /* + * The Command and Address will always have a length of 4 from the DMA's + * perspective. QSPI peripheral will use length of data specified in core registers + */ + if (dev_data->xfer.cmd_length > 0) { + joblist[job_idx++] = EVDMA_JOB(&packet->cmd, 4, EVDMA_ATTRIBUTE); + } + if (dev_data->xfer.addr_length > 0) { + joblist[job_idx++] = EVDMA_JOB(&packet->address, 4, EVDMA_ATTRIBUTE); + } + + if (packet->dir == MSPI_TX) { + preg->CONFIG.RXTRANSFERLENGTH = 0; + + if (packet->num_bytes > 0) { + joblist[job_idx++] = EVDMA_JOB(packet->data_buf, packet->num_bytes, + EVDMA_ATTRIBUTE); + } + + /* Always terminate with null job */ + joblist[job_idx] = EVDMA_NULL_JOB(); + /* rx_job is always EVDMA_NULL_JOB() for transmit */ + transfer_list->rx_job = &joblist[job_idx]; + tmod = QSPI_TMOD_TX_ONLY; + } else { + preg->CONFIG.RXTRANSFERLENGTH = ((packet->num_bytes + dev_data->xfer.addr_length + + dev_data->xfer.cmd_length) >> + dev_data->bytes_per_frame_exp) - 1; + + /* If sending address or command while being configured as controller */ + if (job_idx > 0 && config->op_mode == MSPI_OP_MODE_CONTROLLER) { + tmod = QSPI_TMOD_TX_AND_RX; + + /* After command and address, setup RX job for data */ + joblist[job_idx++] = EVDMA_NULL_JOB(); + transfer_list->rx_job = &joblist[job_idx]; + joblist[job_idx++] = EVDMA_JOB(packet->data_buf, packet->num_bytes, + EVDMA_ATTRIBUTE); + joblist[job_idx] = EVDMA_NULL_JOB(); + } else { + /* Sending command or address while configured as target isn't supported */ + tmod = QSPI_TMOD_RX_ONLY; + + transfer_list->rx_job = &joblist[0]; + joblist[0] = EVDMA_JOB(packet->data_buf, packet->num_bytes, + EVDMA_ATTRIBUTE); + joblist[1] = EVDMA_NULL_JOB(); + transfer_list->tx_job = &joblist[1]; + } + } + + /* + * In slave mode, a tmod register in the wrapper also needs to be set. Currently + * the address not in MDK so this is a temporary fix. + */ + uintptr_t tmod_addr = (uintptr_t)preg + QSPI_TMOD_OFFSET; + + sys_write32(tmod, tmod_addr); + + preg->CONFIG.TXBURSTLENGTH = config->tx_fifo_depth_minus_1 + 1 + - config->dma_tx_data_level; + preg->CONFIG.RXBURSTLENGTH = config->dma_rx_data_level + 1; + preg->DMA.CONFIG.LISTPTR = (uint32_t)transfer_list; + + preg->TASKS_START = 1; +} + +static inline bool vendor_specific_dma_accessible_check(const struct device *dev, + const uint8_t *data_buf) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + return nrf_dma_accessible_check(preg, data_buf); +} + +static inline bool vendor_specific_read_dma_irq(const struct device *dev) +{ + const struct mspi_dw_config *config = dev->config; + NRF_QSPI_Type *preg = (NRF_QSPI_Type *)config->wrapper_regs; + + return (bool) preg->EVENTS_DMA.DONE; +} + +#else /* Supply empty vendor specific macros for generic case */ + static inline void vendor_specific_init(const struct device *dev) { ARG_UNUSED(dev); @@ -134,4 +348,33 @@ static inline int vendor_specific_xip_disable(const struct device *dev, return 0; } -#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */ +#if defined(CONFIG_MSPI_DMA) +static inline void vendor_specific_start_dma_xfer(const struct device *dev) +{ + ARG_UNUSED(dev); +} + +static inline bool vendor_specific_dma_accessible_check(const struct device *dev, + const uint8_t *data_buf) { + ARG_UNUSED(dev); + ARG_UNUSED(data_buf); + + return true; +} +static inline bool vendor_specific_read_dma_irq(const struct device *dev) +{ + ARG_UNUSED(dev); + + return true; +} +#endif /* defined(CONFIG_MSPI_DMA) */ +#endif /* Empty vendor specific macros */ + +/* Empty macros for generic case - no vendor-specific data */ +#ifndef VENDOR_SPECIFIC_DATA_DEFINE +#define VENDOR_SPECIFIC_DATA_DEFINE(inst) +#endif + +#ifndef VENDOR_SPECIFIC_DATA_GET +#define VENDOR_SPECIFIC_DATA_GET(inst) (void *)NULL +#endif diff --git a/dts/bindings/mspi/snps,designware-ssi.yaml b/dts/bindings/mspi/snps,designware-ssi.yaml index 69a2947f9ef..7677325493b 100644 --- a/dts/bindings/mspi/snps,designware-ssi.yaml +++ b/dts/bindings/mspi/snps,designware-ssi.yaml @@ -47,3 +47,20 @@ properties: description: | Number of entries in the RX FIFO above which the controller gets an RX interrupt. Maximum value is the RX FIFO depth - 1. + + dma-transmit-data-level: + type: int + description: | + When in DMA mode, the transmit data level field controls the level at which a DMA request + is made by the transmit logic. A request to transmit is generated when the number of + valid data entries in the transmit FIFO is equal to or below this field value. Lower values + mean less frequent DMA triggers with larger bursts. Higher values mean fewer, smaller bursts + (lower latency, higher overhead). Range: 0-15 + + dma-receive-data-level: + type: int + description: | + When in DMA mode, the receive data level field controls the level at which a DMA request + is made by the receive logic. A request to receive is generated when the number of + valid data entries in the receive FIFO is greater than this value. Lower values mean + more frequent DMA triggers and higher values mean larger less frequent bursts. Range: 0-15 From d1edd0c9950e648a24c0ccace40d6b01175fcf9e Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Thu, 23 Oct 2025 18:18:03 +0100 Subject: [PATCH 8/9] [nrf fromtree] drivers: mspi_dw: Edit core disable to after PM init There is a scenario where a Bus fault occurs as the power management isn't initialised yet and the core is attempted to be turned off via the ssienr register. This commit edits this so the core register is written to after the initialisation of the power management. Signed-off-by: David Jewsbury (cherry picked from commit 4bf6a756280be4d90edf96f1a4c088c4620be41d) --- drivers/mspi/mspi_dw.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index d4700a99261..b52e0533e3a 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -1864,9 +1864,6 @@ static int dev_init(const struct device *dev) } } - /* Make sure controller is disabled. */ - write_ssienr(dev, 0); - #if defined(CONFIG_PINCTRL) if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP); @@ -1877,7 +1874,15 @@ static int dev_init(const struct device *dev) } #endif - return pm_device_driver_init(dev, dev_pm_action_cb); + rc = pm_device_driver_init(dev, dev_pm_action_cb); + if (rc < 0) { + return rc; + } + + /* Make sure controller is disabled. */ + write_ssienr(dev, 0); + + return 0; } static DEVICE_API(mspi, drv_api) = { From f4225dd4ad72e7ed3112e416f94ab6b88f37077d Mon Sep 17 00:00:00 2001 From: David Jewsbury Date: Thu, 23 Oct 2025 18:44:10 +0100 Subject: [PATCH 9/9] [nrf fromtree] drivers: mspi_dw: Add CONFIG_MSPI_DW_DDR Dual data rate isn't always supported so this config enables/disables the relevant code in the driver. Signed-off-by: David Jewsbury (cherry picked from commit 55af2824dc9ab25100f625c24d84cdbc489ecef4) --- drivers/mspi/Kconfig.dw | 9 +++++++++ drivers/mspi/mspi_dw.c | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/drivers/mspi/Kconfig.dw b/drivers/mspi/Kconfig.dw index 71302a801fd..e967671e472 100644 --- a/drivers/mspi/Kconfig.dw +++ b/drivers/mspi/Kconfig.dw @@ -11,6 +11,13 @@ config MSPI_DW if MSPI_DW +config MSPI_DW_DDR + bool "Dual Data-Rate (DDR) capabilities" + default y + help + Dual data rate is supported by some devices and requires specific + registers to be enabled in the IP. + config MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE bool "Handle FIFO in system workqueue" depends on MULTITHREADING @@ -23,6 +30,7 @@ config MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE config MSPI_DW_TXD_DIV int "Designware SSI TX Drive edge divisor" + depends on MSPI_DW_DDR default 4 help Division factor to apply to calculated BAUDR value when writing it @@ -31,6 +39,7 @@ config MSPI_DW_TXD_DIV config MSPI_DW_TXD_MUL int "Designware SSI TX Drive edge multiplier" + depends on MSPI_DW_DDR default 1 help Multiplication factor to apply to calculated BAUDR value when writing diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c index b52e0533e3a..4f6616a9174 100644 --- a/drivers/mspi/mspi_dw.c +++ b/drivers/mspi/mspi_dw.c @@ -937,6 +937,7 @@ static int _api_dev_config(const struct device *dev, switch (cfg->data_rate) { case MSPI_DATA_RATE_SINGLE: break; +#if defined(CONFIG_MSPI_DW_DDR) case MSPI_DATA_RATE_DUAL: dev_data->spi_ctrlr0 |= SPI_CTRLR0_INST_DDR_EN_BIT; /* Also need to set DDR_EN bit */ @@ -944,6 +945,7 @@ static int _api_dev_config(const struct device *dev, case MSPI_DATA_RATE_S_D_D: dev_data->spi_ctrlr0 |= SPI_CTRLR0_SPI_DDR_EN_BIT; break; +#endif default: LOG_ERR("Data rate %d not supported", cfg->data_rate); @@ -1238,6 +1240,7 @@ static int start_next_packet(const struct device *dev) write_spi_ctrlr0(dev, dev_data->spi_ctrlr0); write_baudr(dev, dev_data->baudr); write_rx_sample_dly(dev, dev_data->rx_sample_dly); +#if defined(CONFIG_MSPI_DW_DDR) if (dev_data->spi_ctrlr0 & (SPI_CTRLR0_SPI_DDR_EN_BIT | SPI_CTRLR0_INST_DDR_EN_BIT)) { int txd = (CONFIG_MSPI_DW_TXD_MUL * dev_data->baudr) / @@ -1247,6 +1250,7 @@ static int start_next_packet(const struct device *dev) } else { write_txd_drive_edge(dev, 0); } +#endif if (xip_enabled) { write_ssienr(dev, SSIENR_SSIC_EN_BIT);