diff --git a/boards/nxp/frdm_mcxc444/frdm_mcxc444.dts b/boards/nxp/frdm_mcxc444/frdm_mcxc444.dts index 8a6da0a9764a9..c4b0c73fc20ae 100644 --- a/boards/nxp/frdm_mcxc444/frdm_mcxc444.dts +++ b/boards/nxp/frdm_mcxc444/frdm_mcxc444.dts @@ -191,3 +191,7 @@ zephyr_udc0: &usb { status = "okay"; num-bidir-endpoints = <8>; }; + +&dma { + status = "okay"; +}; diff --git a/boards/nxp/frdm_mcxc444/frdm_mcxc444.yaml b/boards/nxp/frdm_mcxc444/frdm_mcxc444.yaml index 201eacecb179c..9c48a0fb14852 100644 --- a/boards/nxp/frdm_mcxc444/frdm_mcxc444.yaml +++ b/boards/nxp/frdm_mcxc444/frdm_mcxc444.yaml @@ -23,6 +23,7 @@ supported: - pwm - usb_device - usbd + - dma testing: ignore_tags: - net diff --git a/drivers/clock_control/clock_control_mcux_sim.c b/drivers/clock_control/clock_control_mcux_sim.c index 180f44fce783f..c5b0e224fe4da 100644 --- a/drivers/clock_control/clock_control_mcux_sim.c +++ b/drivers/clock_control/clock_control_mcux_sim.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017,2025 NXP + * Copyright 2017, 2025 NXP * * SPDX-License-Identifier: Apache-2.0 */ @@ -37,6 +37,16 @@ static int mcux_sim_on(const struct device *dev, } #endif +#if DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(dma)) + if ((uint32_t)sub_system == KINETIS_SIM_DMA_CLK) { + clock_ip_name = kCLOCK_Dma0; + } + + if ((uint32_t)sub_system == KINETIS_SIM_DMAMUX_CLK) { + clock_ip_name = kCLOCK_Dmamux0; + } +#endif + CLOCK_EnableClock(clock_ip_name); return 0; diff --git a/drivers/dma/CMakeLists.txt b/drivers/dma/CMakeLists.txt index 240e7f364e182..e9ccb7076696f 100644 --- a/drivers/dma/CMakeLists.txt +++ b/drivers/dma/CMakeLists.txt @@ -56,3 +56,4 @@ zephyr_library_sources_ifdef(CONFIG_DMA_WCH dma_wch.c) zephyr_library_sources_ifdef(CONFIG_DMA_TI_CC23X0 dma_ti_cc23x0.c) zephyr_library_sources_ifdef(CONFIG_DMA_NPCX_GDMA dma_npcx_gdma.c) zephyr_library_sources_ifdef(CONFIG_DMA_SF32LB dma_sf32lb.c) +zephyr_library_sources_ifdef(CONFIG_DMA_NXP dma_nxp.c) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 47254e3ba3372..5de24db0c659c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -103,4 +103,6 @@ source "drivers/dma/Kconfig.npcx" source "drivers/dma/Kconfig.sf32lb" +source "drivers/dma/Kconfig.nxp" + endif # DMA diff --git a/drivers/dma/Kconfig.nxp b/drivers/dma/Kconfig.nxp new file mode 100644 index 0000000000000..e31a77288729f --- /dev/null +++ b/drivers/dma/Kconfig.nxp @@ -0,0 +1,7 @@ +# Copyright 2025 +# SPDX-License-Identifier: Apache-2.0 + +config DMA_NXP + bool "NXP DMA driver" + default y + depends on DT_HAS_NXP_DMA_ENABLED diff --git a/drivers/dma/dma_nxp.c b/drivers/dma/dma_nxp.c new file mode 100644 index 0000000000000..8061cc757a31c --- /dev/null +++ b/drivers/dma/dma_nxp.c @@ -0,0 +1,474 @@ +/* + * Copyright 2025 NXP + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(nxp_dma, CONFIG_DMA_LOG_LEVEL); + +#define DT_DRV_COMPAT nxp_dma + +struct nxp_dma_chan_data { + const struct device *dev; + dma_callback_t cb; + void *user_data; + uint32_t width; + uint8_t src_inc; + uint8_t dst_inc; + enum dma_channel_direction dir; + bool busy; + uint16_t dmamux_source; +}; + +struct nxp_dma_data { + struct dma_context ctx; + struct nxp_dma_chan_data *chan; +}; + +struct nxp_dma_config { + DMA_Type *dma_base; + DMAMUX_Type *dmamux_base; + uint8_t num_channels; + uint8_t dma_requests; + void (*irq_config_func)(const struct device *dev); +#if IS_ENABLED(CONFIG_CLOCK_CONTROL) + const struct device *dma_clk_dev; + clock_control_subsys_t dma_clk_subsys; + const struct device *dmamux_clk_dev; + clock_control_subsys_t dmamux_clk_subsys; +#endif + /* Pointer to per-instance channel allocation bitmap */ + atomic_t *channels_atomic; +}; + +static inline uint32_t nxp_dma_bytes_to_size_field(uint32_t bytes) +{ + switch (bytes) { + case 1: + return 1U; /* 8 bits */ + case 2: + return 2U; /* 16 bits */ + case 4: + return 0U; /* 32 bits */ + default: + return 0U; /* default to 32 bits */ + } +} + +static inline void nxp_dma_reset_channel(DMA_Type *dma, uint32_t ch) +{ + /* Clear DSR_BCR[DONE] and reset registers to defaults */ + dma->DMA[ch].DSR_BCR |= DMA_DSR_BCR_DONE_MASK; + dma->DMA[ch].SAR = 0U; + dma->DMA[ch].DAR = 0U; + dma->DMA[ch].DSR_BCR = 0U; + /* Enable auto stop request and cycle steal by default */ + dma->DMA[ch].DCR = DMA_DCR_D_REQ_MASK | DMA_DCR_CS_MASK; +} + +static int nxp_dma_configure(const struct device *dev, uint32_t channel, + struct dma_config *config) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + DMAMUX_Type *mux = cfg->dmamux_base; + DMA_Type *dma = cfg->dma_base; + uint8_t src_inc, dst_inc; + + if (channel >= cfg->num_channels || config == NULL || + config->head_block == NULL) { + return -EINVAL; + } + + if (!((config->source_data_size == 1U) || (config->source_data_size == 2U) || + (config->source_data_size == 4U))) { + return -EINVAL; + } + + if (config->dest_data_size != config->source_data_size) { + return -EINVAL; + } + + /* Reset channel. */ + nxp_dma_reset_channel(dma, channel); + + /* Source and destination address increment. */ + src_inc = (config->head_block->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) ? 0U : 1U; + dst_inc = (config->head_block->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) ? 0U : 1U; + + /* Save channel settings */ + chan_data->dev = dev; + chan_data->busy = false; + chan_data->src_inc = src_inc; + chan_data->dst_inc = dst_inc; + chan_data->cb = config->dma_callback; + chan_data->user_data = config->user_data; + chan_data->width = config->dest_data_size; + chan_data->dir = config->channel_direction; + chan_data->dmamux_source = (uint16_t)config->dma_slot; + + /* Configure DCR register: + * destination size, source size, destination increment, source increment. + */ + dma->DMA[channel].DCR &= ~(DMA_DCR_DSIZE_MASK | DMA_DCR_SSIZE_MASK | + DMA_DCR_DINC_MASK | DMA_DCR_SINC_MASK | + DMA_DCR_EINT_MASK | DMA_DCR_ERQ_MASK); + + dma->DMA[channel].DCR |= (DMA_DCR_DSIZE(nxp_dma_bytes_to_size_field(chan_data->width)) | + DMA_DCR_SSIZE(nxp_dma_bytes_to_size_field(chan_data->width)) | + DMA_DCR_DINC((uint32_t)chan_data->dst_inc) | + DMA_DCR_SINC((uint32_t)chan_data->src_inc)); + + /* Trigger an interrupt after transmission is complete. */ + if (config->complete_callback_en || chan_data->cb) { + dma->DMA[channel].DCR |= DMA_DCR_EINT_MASK; + } + + /* P2P and P2M use HW request (ERQ = 1, CS = 1) + * M2M and M2P use SW request (START = 1, ERQ = 0, CS = 0) + */ + if ((chan_data->dir == PERIPHERAL_TO_PERIPHERAL) || + (chan_data->dir == PERIPHERAL_TO_MEMORY)) { + dma->DMA[channel].DCR |= (DMA_DCR_ERQ_MASK | DMA_DCR_CS_MASK); + } else { + dma->DMA[channel].DCR &= ~(DMA_DCR_ERQ_MASK | DMA_DCR_CS_MASK); + } + + /* DMAMUX enabled for P2P and P2M directions; disable for M2M and M2P. */ + if (mux != NULL && (chan_data->dir == PERIPHERAL_TO_PERIPHERAL || + chan_data->dir == PERIPHERAL_TO_MEMORY)) { + if (config->dma_slot != 0U) { + mux->CHCFG[channel] = ((mux->CHCFG[channel] & + (uint8_t)~DMAMUX_CHCFG_SOURCE_MASK) | + DMAMUX_CHCFG_SOURCE(config->dma_slot)); + mux->CHCFG[channel] |= DMAMUX_CHCFG_ENBL_MASK; + } + } else { + mux->CHCFG[channel] &= (uint8_t)~DMAMUX_CHCFG_ENBL_MASK; + } + + const struct dma_block_config *blk = config->head_block; + + /* Enforce alignment to width */ + if (((blk->source_address % chan_data->width) != 0U) || + ((blk->dest_address % chan_data->width) != 0U)) { + return -EINVAL; + } + + dma->DMA[channel].SAR = blk->source_address; + dma->DMA[channel].DAR = blk->dest_address; + dma->DMA[channel].DSR_BCR = DMA_DSR_BCR_BCR(blk->block_size); + + return 0; +} + +/** + * @note: start() does not reprogram the SAR/DAR/BCR registers, but instead + * directly uses the values stored in the current hardware registers. + * These values originate from the most recent configure()/reload() call, + * or from the register state after the last transmission (including any + * remaining BCR settings preserved by stop() to support repeated starts, + * and any advanced SAR/DAR registers). + */ +static int nxp_dma_start(const struct device *dev, uint32_t channel) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + DMA_Type *dma = cfg->dma_base; + + if (channel >= cfg->num_channels) { + return -EINVAL; + } + + if (chan_data->busy) { + return -EBUSY; + } + + if ((dma->DMA[channel].DSR_BCR & DMA_DSR_BCR_BCR_MASK) == 0U) { + return -EINVAL; + } + + chan_data->busy = true; + + /* Start: either allow HW request or issue software start */ + if ((dma->DMA[channel].DCR & DMA_DCR_ERQ_MASK) == 0U) { + dma->DMA[channel].DCR |= DMA_DCR_START_MASK; + } + + return 0; +} + +static int nxp_dma_stop(const struct device *dev, uint32_t channel) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + DMA_Type *dma = cfg->dma_base; + + if (channel >= cfg->num_channels) { + return -EINVAL; + } + + /* Disable ERQ to stop further HW requests */ + dma->DMA[channel].DCR &= ~DMA_DCR_ERQ_MASK; + + /* Disable DMAMUX channel. */ + if (cfg->dmamux_base) { + cfg->dmamux_base->CHCFG[channel] &= (uint8_t)~DMAMUX_CHCFG_ENBL_MASK; + } + + /* Capture remaining BCR and then restore the remaining BCR to enable + * subsequent continuation via start() without calling configure()/reload(). + * Note that SAR/DAR have advanced to the current position during transmission + * and are intentionally not restored to enable 'resumable download'. + */ + uint32_t remain = (dma->DMA[channel].DSR_BCR & DMA_DSR_BCR_BCR_MASK) >> + DMA_DSR_BCR_BCR_SHIFT; + + /* Clear status/error bits; note this also clears BCR to 0 per RM */ + dma->DMA[channel].DSR_BCR |= DMA_DSR_BCR_DONE_MASK; + + if (remain != 0U) { + dma->DMA[channel].DSR_BCR = DMA_DSR_BCR_BCR(remain); + } + + chan_data->busy = false; + + return 0; +} + +static int nxp_dma_reload(const struct device *dev, uint32_t channel, + uint32_t src, uint32_t dst, size_t size) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + DMA_Type *dma = cfg->dma_base; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + + if (channel >= cfg->num_channels) { + return -EINVAL; + } + + /* Alignment requirements: address aligned to transfer width */ + if ((src % chan_data->width) != 0U || (dst % chan_data->width) != 0U) { + return -EINVAL; + } + + if (chan_data->busy) { + return -EBUSY; + } + + /* Configure SAR/DAR and BCR */ + dma->DMA[channel].SAR = src; + dma->DMA[channel].DAR = dst; + dma->DMA[channel].DSR_BCR = DMA_DSR_BCR_BCR(size); + + return 0; +} + +static int nxp_dma_get_status(const struct device *dev, uint32_t channel, + struct dma_status *status) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + DMA_Type *dma = cfg->dma_base; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + + if (channel >= cfg->num_channels) { + return -EINVAL; + } + + status->busy = ((dma->DMA[channel].DSR_BCR & DMA_DSR_BCR_BSY_MASK) != 0U) && + chan_data->busy; + status->pending_length = (dma->DMA[channel].DSR_BCR & DMA_DSR_BCR_BCR_MASK) >> + DMA_DSR_BCR_BCR_SHIFT; + status->dir = chan_data->dir; + + return 0; +} + +static void nxp_dma_isr(const struct device *dev, uint32_t channel) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + struct nxp_dma_chan_data *chan_data = &data->chan[channel]; + DMA_Type *dma = cfg->dma_base; + int ret = DMA_STATUS_COMPLETE; + + if (dma->DMA[channel].DSR_BCR & (DMA_DSR_BCR_BED_MASK | + DMA_DSR_BCR_BES_MASK | DMA_DSR_BCR_CE_MASK)) { + ret = -EIO; + } + + /* Clear DONE flags */ + dma->DMA[channel].DSR_BCR |= DMA_DSR_BCR_DONE_MASK; + + chan_data->busy = false; + + if (chan_data->cb) { + chan_data->cb(dev, chan_data->user_data, channel, ret); + } + + barrier_dsync_fence_full(); +} + +static int nxp_dma_init(const struct device *dev) +{ + const struct nxp_dma_config *cfg = dev->config; + struct nxp_dma_data *data = dev->data; + int ret; + +#if IS_ENABLED(CONFIG_CLOCK_CONTROL) + if (cfg->dma_clk_dev != NULL) { + if (!device_is_ready(cfg->dma_clk_dev)) { + LOG_ERR("DMA clock device not ready"); + return -ENODEV; + } + ret = clock_control_on(cfg->dma_clk_dev, cfg->dma_clk_subsys); + if (ret < 0) { + LOG_ERR("Failed to enable DMA clock (%d)", ret); + return ret; + } + } + + if (cfg->dmamux_clk_dev != NULL) { + if (!device_is_ready(cfg->dmamux_clk_dev)) { + LOG_ERR("DMAMUX clock device not ready"); + return -ENODEV; + } + ret = clock_control_on(cfg->dmamux_clk_dev, cfg->dmamux_clk_subsys); + if (ret < 0) { + LOG_ERR("Failed to enable DMAMUX clock (%d)", ret); + return ret; + } + } +#endif + + data->ctx.magic = DMA_MAGIC; + data->ctx.atomic = cfg->channels_atomic; + data->ctx.dma_channels = cfg->num_channels; + + /* Reset all channels */ + for (uint32_t ch = 0; ch < cfg->num_channels; ch++) { + nxp_dma_reset_channel(cfg->dma_base, ch); + + /* Disable DMAMUX channel if present */ + if (cfg->dmamux_base) { + cfg->dmamux_base->CHCFG[ch] &= (uint8_t)~DMAMUX_CHCFG_ENBL_MASK; + } + + data->chan[ch].busy = false; + data->chan[ch].cb = NULL; + data->chan[ch].user_data = NULL; + } + + cfg->irq_config_func(dev); + + return 0; +} + +static DEVICE_API(dma, nxp_dma_api) = { + .config = nxp_dma_configure, + .start = nxp_dma_start, + .stop = nxp_dma_stop, + .reload = nxp_dma_reload, + .get_status = nxp_dma_get_status, +}; + +/* IRQ dispatcher per-channel */ +#define NXP_DMA_DECLARE_IRQ(inst, ch) \ + static void _CONCAT(_CONCAT(nxp_dma_irq, inst), ch)(const struct device *dev) \ + { \ + nxp_dma_isr(dev, ch); \ + } + +/* Per-instance macros, connect each channel IRQ by index. */ +#define NXP_DMA_IRQ_CFG_FUNC(inst) \ + static void _CONCAT(nxp_dma_irq_config_func, inst)(const struct device *dev) \ + { \ + IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, 0), ( \ + IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 0, irq), \ + DT_INST_IRQ_BY_IDX(inst, 0, priority), \ + _CONCAT(_CONCAT(nxp_dma_irq, inst), 0), \ + DEVICE_DT_INST_GET(inst), 0); \ + irq_enable(DT_INST_IRQ_BY_IDX(inst, 0, irq)); \ + )) \ + IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, 1), ( \ + IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 1, irq), \ + DT_INST_IRQ_BY_IDX(inst, 1, priority), \ + _CONCAT(_CONCAT(nxp_dma_irq, inst), 1), \ + DEVICE_DT_INST_GET(inst), 0); \ + irq_enable(DT_INST_IRQ_BY_IDX(inst, 1, irq)); \ + )) \ + IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, 2), ( \ + IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 2, irq), \ + DT_INST_IRQ_BY_IDX(inst, 2, priority), \ + _CONCAT(_CONCAT(nxp_dma_irq, inst), 2), \ + DEVICE_DT_INST_GET(inst), 0); \ + irq_enable(DT_INST_IRQ_BY_IDX(inst, 2, irq)); \ + )) \ + IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, 3), ( \ + IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 3, irq), \ + DT_INST_IRQ_BY_IDX(inst, 3, priority), \ + _CONCAT(_CONCAT(nxp_dma_irq, inst), 3), \ + DEVICE_DT_INST_GET(inst), 0); \ + irq_enable(DT_INST_IRQ_BY_IDX(inst, 3, irq)); \ + )); \ + } + +#if IS_ENABLED(CONFIG_CLOCK_CONTROL) +#define NXP_DMA_CLOCKS_INIT(inst) \ + .dma_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_IDX(inst, 0)), \ + .dma_clk_subsys = \ + (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_IDX(inst, 0, name), \ + .dmamux_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_IDX(inst, 1)), \ + .dmamux_clk_subsys = \ + (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_IDX(inst, 1, name), +#else +#define NXP_DMA_CLOCKS_INIT(inst) +#endif + +#define NXP_DMA_INIT(inst) \ + NXP_DMA_DECLARE_IRQ(inst, 0) \ + NXP_DMA_DECLARE_IRQ(inst, 1) \ + NXP_DMA_DECLARE_IRQ(inst, 2) \ + NXP_DMA_DECLARE_IRQ(inst, 3) \ + NXP_DMA_IRQ_CFG_FUNC(inst) \ + \ + ATOMIC_DEFINE(_CONCAT(nxp_dma_atomic, inst), DT_INST_PROP(inst, dma_channels)); \ + \ + static struct nxp_dma_chan_data \ + _CONCAT(nxp_dma_chan_data, inst)[DT_INST_PROP(inst, dma_channels)]; \ + \ + static struct nxp_dma_data _CONCAT(nxp_dma_runtime, inst) = { \ + .chan = _CONCAT(nxp_dma_chan_data, inst), \ + }; \ + \ + static const struct nxp_dma_config _CONCAT(nxp_dma_config, inst) = { \ + .dma_base = (DMA_Type *)DT_INST_REG_ADDR_BY_IDX(inst, 0), \ + .dmamux_base = (DMAMUX_Type *)DT_INST_REG_ADDR_BY_IDX(inst, 1), \ + .num_channels = DT_INST_PROP(inst, dma_channels), \ + .dma_requests = DT_INST_PROP(inst, dma_requests), \ + .irq_config_func = _CONCAT(nxp_dma_irq_config_func, inst), \ + NXP_DMA_CLOCKS_INIT(inst) \ + .channels_atomic = _CONCAT(nxp_dma_atomic, inst), \ + }; \ + \ + \ + DEVICE_DT_INST_DEFINE(inst, nxp_dma_init, NULL, &_CONCAT(nxp_dma_runtime, inst),\ + &_CONCAT(nxp_dma_config, inst), PRE_KERNEL_1, \ + CONFIG_DMA_INIT_PRIORITY, &nxp_dma_api); + +DT_INST_FOREACH_STATUS_OKAY(NXP_DMA_INIT) diff --git a/dts/arm/nxp/nxp_mcxc_common.dtsi b/dts/arm/nxp/nxp_mcxc_common.dtsi index b7f92435ee1d0..fc01b3fd08c86 100644 --- a/dts/arm/nxp/nxp_mcxc_common.dtsi +++ b/dts/arm/nxp/nxp_mcxc_common.dtsi @@ -362,6 +362,19 @@ NXP_VREF_MODE_LOW_POWER NXP_VREF_MODE_HIGH_POWER>; }; + + dma: dma-controller@40008000 { + compatible = "nxp,dma"; + #dma-cells = <2>; + dma-channels = <4>; + dma-requests = <64>; + reg = <0x40008000 0x1000>, + <0x40021000 0x1000>; + clocks = <&sim KINETIS_SIM_DMA_CLK 0 0>, + <&sim KINETIS_SIM_DMAMUX_CLK 0 0>; + interrupts = <0 0>, <1 0>, <2 0>, <3 0>; + status = "disabled"; + }; }; }; diff --git a/dts/bindings/dma/nxp,dma.yaml b/dts/bindings/dma/nxp,dma.yaml new file mode 100644 index 0000000000000..9c0c119fca26b --- /dev/null +++ b/dts/bindings/dma/nxp,dma.yaml @@ -0,0 +1,45 @@ +# Copyright 2025 NXP +# SPDX-License-Identifier: Apache-2.0 + +description: NXP DMA controller (DMA + DMAMUX) + +compatible: "nxp,dma" + +include: dma-controller.yaml + +properties: + reg: + required: true + description: | + Specifies base address and size of DMA and respective DMAMUX base address + that routes DMA sources. Provide two regions: index 0 is DMA, index 1 is DMAMUX. + + interrupts: + required: true + description: | + Interrupts for each DMA channel. + + dma-channels: + required: true + description: | + Specifies the number of DMA channels supported by the controller. This value is + used to validate the channel number provided in the DMA specifier. + + dma-requests: + required: true + description: | + Indicates the maximum value of the DMA request sources (slots) index supported by + DMAMUX. This value is used to validate the request source index provided in the + DMA specifier. + + "#dma-cells": + type: int + required: true + const: 2 + description: | + Number of items to expect in a specifier, including the channel number and + the DMA request source. + +dma-cells: + - channel + - source diff --git a/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.conf b/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.conf new file mode 100644 index 0000000000000..a1e9e76dcaaa1 --- /dev/null +++ b/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.conf @@ -0,0 +1,4 @@ +# Copyright 2025 NXP +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_DMA_LOOP_TRANSFER_SIZE=1024 diff --git a/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.overlay b/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.overlay new file mode 100644 index 0000000000000..d696cf5e44937 --- /dev/null +++ b/tests/drivers/dma/loop_transfer/boards/frdm_mcxc444.overlay @@ -0,0 +1,9 @@ +/* + * Copyright 2025 NXP + * + * SPDX-License-Identifier: Apache-2.0 + */ + +tst_dma0: &dma { + +};