diff --git a/drivers/dma/Kconfig.mcux_edma b/drivers/dma/Kconfig.mcux_edma index 9b3d0c9e0fbcb..4a618aa9f7ac7 100644 --- a/drivers/dma/Kconfig.mcux_edma +++ b/drivers/dma/Kconfig.mcux_edma @@ -44,6 +44,8 @@ config DMA_MCUX_TEST_SLOT_START config DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS bool "Use DTCM for DMA descriptors" + default y + depends on DT_HAS_NXP_IMX_DTCM_ENABLED help When this option is activated, the descriptors for DMA transfer are located in the DTCM (Data Tightly Coupled Memory). diff --git a/drivers/dma/dma_mcux_edma.c b/drivers/dma/dma_mcux_edma.c index e4115d10b1e41..eede99c1eb36e 100644 --- a/drivers/dma/dma_mcux_edma.c +++ b/drivers/dma/dma_mcux_edma.c @@ -92,6 +92,14 @@ struct dma_mcux_channel_transfer_edma_settings { enum dma_channel_direction direction; edma_transfer_type_t transfer_type; bool valid; + /* This var indicate it is dynamic SG mode or loop SG mode. */ + bool cyclic; + /* These parameters are for cyclic mode only. + * Next empty TCD idx which can be used for transfer + */ + volatile int8_t write_idx; + /* How many TCDs in TCD pool is emtpy(can be used to write transfer parameters) */ + volatile uint8_t empty_tcds; }; @@ -130,6 +138,35 @@ struct dma_mcux_edma_data { (ch % DEV_CFG(dev)->channels_per_mux) ^ (DEV_CFG(dev)->dmamux_reg_offset) #endif +/* Definations for SW TCD fields */ +#if defined(CONFIG_DMA_MCUX_EDMA) || defined(CONFIG_DMA_MCUX_EDMA_V3) +#define EDMA_TCD_SADDR(tcd, flag) ((tcd)->SADDR) +#define EDMA_TCD_DADDR(tcd, flag) ((tcd)->DADDR) +#define EDMA_TCD_BITER(tcd, flag) ((tcd)->BITER) +#define EDMA_TCD_CITER(tcd, flag) ((tcd)->CITER) +#define EDMA_TCD_CSR(tcd, flag) ((tcd)->CSR) +#define EDMA_TCD_DLAST_SGA(tcd, flag) ((tcd)->DLAST_SGA) +#define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CSR_ACTIVE_MASK) +#elif defined(CONFIG_DMA_MCUX_EDMA_V4) +/* Above macros have been defined in fsl_edma_core.h */ +#define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CH_CSR_ACTIVE_MASK) +#endif + +/* Definations for HW TCD fields */ +#ifdef CONFIG_DMA_MCUX_EDMA +#define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].SADDR) +#define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].DADDR) +#define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->TCD[ch].BITER_ELINKNO) +#define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->TCD[ch].CITER_ELINKNO) +#define EDMA_HW_TCD_CSR(dev, ch) (DEV_BASE(dev)->TCD[ch].CSR) +#elif defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4) +#define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_SADDR) +#define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_DADDR) +#define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_BITER_ELINKNO) +#define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_CITER_ELINKNO) +#define EDMA_HW_TCD_CSR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_CSR) +#endif + /* * The hardware channel (takes the gap into account) is used when access DMA registers. * For data structures in the shim driver still use the primitive channel. @@ -178,10 +215,14 @@ static void nxp_edma_callback(edma_handle_t *handle, void *param, bool transferD { int ret = -EIO; struct call_back *data = (struct call_back *)param; - uint32_t channel = dma_mcux_edma_remove_channel_gap(data->dev, handle->channel); - if (transferDone) { + if (data->transfer_settings.cyclic) { + data->transfer_settings.empty_tcds++; + /*In loop mode, DMA is always busy*/ + data->busy = 1; + ret = DMA_STATUS_COMPLETE; + } else if (transferDone) { /* DMA is no longer busy when there are no remaining TCDs to transfer */ data->busy = (handle->tcdPool != NULL) && (handle->tcdUsed > 0); ret = DMA_STATUS_COMPLETE; @@ -254,6 +295,7 @@ static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel, edma_transfer_type_t transfer_type; unsigned int key; int ret = 0; + edma_tcd_t *tcd = NULL; if (slot >= DEV_CFG(dev)->dma_requests) { LOG_ERR("source number is out of scope %d", slot); @@ -316,7 +358,7 @@ static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel, data->transfer_settings.direction = config->channel_direction; data->transfer_settings.transfer_type = transfer_type; data->transfer_settings.valid = true; - + data->transfer_settings.cyclic = config->cyclic; /* Lock and page in the channel configuration */ key = irq_lock(); @@ -357,26 +399,93 @@ static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel, LOG_DBG("channel is %d", channel); EDMA_EnableChannelInterrupts(DEV_BASE(dev), hw_channel, kEDMA_ErrorInterruptEnable); + /* Initialize all TCD pool as 0*/ + for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) { + memset(&DEV_CFG(dev)->tcdpool[channel][i], 0, + sizeof(DEV_CFG(dev)->tcdpool[channel][i])); + } + if (block_config->source_gather_en || block_config->dest_scatter_en) { - EDMA_InstallTCDMemory(p_handle, DEV_CFG(dev)->tcdpool[channel], - CONFIG_DMA_TCD_QUEUE_SIZE); - while (block_config != NULL) { + if (config->cyclic) { + /* Loop SG mode */ + data->transfer_settings.write_idx = 0; + data->transfer_settings.empty_tcds = CONFIG_DMA_TCD_QUEUE_SIZE; + EDMA_PrepareTransfer( - &(data->transferConfig), - (void *)block_config->source_address, - config->source_data_size, - (void *)block_config->dest_address, - config->dest_data_size, - config->source_burst_length, + &data->transferConfig, (void *)block_config->source_address, + config->source_data_size, (void *)block_config->dest_address, + config->dest_data_size, config->source_burst_length, block_config->block_size, transfer_type); - const status_t submit_status = - EDMA_SubmitTransfer(p_handle, &(data->transferConfig)); - if (submit_status != kStatus_Success) { - LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); - ret = -EFAULT; + /* Init all TCDs with the para in transfer config and link them. */ + for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) { + EDMA_TcdSetTransferConfig( + &DEV_CFG(dev)->tcdpool[channel][i], &data->transferConfig, + &DEV_CFG(dev)->tcdpool[channel][(i + 1) % + CONFIG_DMA_TCD_QUEUE_SIZE]); + + /* Enable Major loop interrupt.*/ + EDMA_TcdEnableInterrupts(&DEV_CFG(dev)->tcdpool[channel][i], + kEDMA_MajorInterruptEnable); + } + + /* Load valid transfer parameters */ + while (block_config != NULL && data->transfer_settings.empty_tcds > 0) { + tcd = &(DEV_CFG(dev)->tcdpool[channel] + [data->transfer_settings.write_idx]); + + EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = block_config->source_address; + EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = block_config->dest_address; + EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) = + block_config->block_size / config->source_data_size; + EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) = + block_config->block_size / config->source_data_size; + /*Enable auto stop for last transfer.*/ + if (block_config->next_block == NULL) { + EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U); + } else { + EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U); + } + + data->transfer_settings.write_idx = + (data->transfer_settings.write_idx + 1) % + CONFIG_DMA_TCD_QUEUE_SIZE; + data->transfer_settings.empty_tcds--; + block_config = block_config->next_block; + } + + if (block_config != NULL && data->transfer_settings.empty_tcds == 0) { + /* User input more blocks than TCD number, return error */ + LOG_ERR("Too much request blocks,increase TCD buffer size!"); + ret = -ENOBUFS; + } + /* Push the 1st TCD into HW */ + EDMA_InstallTCD(p_handle->base, hw_channel, + &DEV_CFG(dev)->tcdpool[channel][0]); + + } else { + /* Dynamic Scatter Gather mode */ + EDMA_InstallTCDMemory(p_handle, DEV_CFG(dev)->tcdpool[channel], + CONFIG_DMA_TCD_QUEUE_SIZE); + + while (block_config != NULL) { + EDMA_PrepareTransfer(&(data->transferConfig), + (void *)block_config->source_address, + config->source_data_size, + (void *)block_config->dest_address, + config->dest_data_size, + config->source_burst_length, + block_config->block_size, transfer_type); + + const status_t submit_status = + EDMA_SubmitTransfer(p_handle, &(data->transferConfig)); + if (submit_status != kStatus_Success) { + LOG_ERR("Error submitting EDMA Transfer: 0x%x", + submit_status); + ret = -EFAULT; + } + block_config = block_config->next_block; } - block_config = block_config->next_block; } } else { /* block_count shall be 1 */ @@ -395,11 +504,8 @@ static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel, LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); ret = -EFAULT; } -#if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4) - LOG_DBG("DMA TCD_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].TCD_CSR); -#else - LOG_DBG("data csr is 0x%x", DEV_BASE(dev)->TCD[hw_channel].CSR); -#endif + + LOG_DBG("DMA TCD CSR 0x%x", EDMA_HW_TCD_CSR(dev, hw_channel)); } if (config->dest_chaining_en) { @@ -491,11 +597,23 @@ static int dma_mcux_edma_resume(const struct device *dev, uint32_t channel) return 0; } +static void dma_mcux_edma_update_hw_tcd(const struct device *dev, uint32_t channel, uint32_t src, + uint32_t dst, size_t size) +{ + EDMA_HW_TCD_SADDR(dev, channel) = src; + EDMA_HW_TCD_DADDR(dev, channel) = dst; + EDMA_HW_TCD_BITER(dev, channel) = size; + EDMA_HW_TCD_CITER(dev, channel) = size; + EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_DREQ(1U); +} static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct call_back *data = DEV_CHANNEL_DATA(dev, channel); + edma_tcd_t *tcd = NULL; + edma_tcd_t *pre_tcd = NULL; + uint32_t hw_id, sw_id; /* Lock the channel configuration */ const unsigned int key = irq_lock(); @@ -507,29 +625,115 @@ static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel, goto cleanup; } - /* If the tcdPool is not in use (no s/g) then only a single TCD can be active at once. */ - if (data->busy && data->edma_handle.tcdPool == NULL) { - LOG_ERR("EDMA busy. Wait until the transfer completes before reloading."); - ret = -EBUSY; - goto cleanup; - } + if (data->transfer_settings.cyclic) { + if (data->transfer_settings.empty_tcds == 0) { + LOG_ERR("TCD list is full in loop mode."); + ret = -ENOBUFS; + goto cleanup; + } - EDMA_PrepareTransfer( - &(data->transferConfig), - (void *)src, - data->transfer_settings.source_data_size, - (void *)dst, - data->transfer_settings.dest_data_size, - data->transfer_settings.source_burst_length, - size, - data->transfer_settings.transfer_type); - - const status_t submit_status = - EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig)); - - if (submit_status != kStatus_Success) { - LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); - ret = -EFAULT; + /* Convert size into major loop count */ + size = size / data->transfer_settings.dest_data_size; + + /* Configure a TCD for the transfer */ + tcd = &(DEV_CFG(dev)->tcdpool[channel][data->transfer_settings.write_idx]); + pre_tcd = &(DEV_CFG(dev)->tcdpool[channel][(data->transfer_settings.write_idx - 1) % + CONFIG_DMA_TCD_QUEUE_SIZE]); + + EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = src; + EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = dst; + EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) = size; + EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) = size; + /* Enable automatically stop */ + EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U); + sw_id = EDMA_TCD_DLAST_SGA(tcd, kEDMA_EDMA4Flag); + + /* Block the peripheral's hardware request trigger to prevent + * starting the DMA before updating the TCDs. Make sure the + * code between EDMA_DisableChannelRequest() and + * EDMA_EnableChannelRequest() is minimum. + */ + EDMA_DisableChannelRequest(DEV_BASE(dev), channel); + + /* Wait for the DMA to be inactive before updating the TCDs. + * The CSR[ACTIVE] bit will deassert quickly after the EDMA's + * minor loop burst completes. + */ + while (EDMA_HW_TCD_CSR(dev, channel) & EDMA_HW_TCD_CH_ACTIVE_MASK) { + ; + } + + /* Identify the current active TCD. Use DLAST_SGA as the HW ID */ + hw_id = EDMA_GetNextTCDAddress(DEV_EDMA_HANDLE(dev, channel)); + if (data->transfer_settings.empty_tcds >= CONFIG_DMA_TCD_QUEUE_SIZE || + hw_id == sw_id) { + /* All transfers have been done.DMA is stopped automatically, + * invalid TCD has been loaded into the HW, update HW. + */ + dma_mcux_edma_update_hw_tcd(dev, channel, src, dst, size); + LOG_DBG("Transfer done,auto stop"); + + } else { + /* Previous TCD can automatically start this TCD. + * Enable the peripheral DMA request in the previous TCD + */ + EDMA_TCD_CSR(pre_tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U); + + if (data->transfer_settings.empty_tcds == CONFIG_DMA_TCD_QUEUE_SIZE - 1 || + hw_id == (uint32_t)tcd) { + /* DMA is running on last transfer. HW has loaded the last one, + * we need ensure it's DREQ is cleared. + */ + EDMA_EnableAutoStopRequest(DEV_BASE(dev), channel, false); + LOG_DBG("Last transfer."); + } + LOG_DBG("Manu stop"); + } + +#ifdef CONFIG_DMA_MCUX_EDMA + /* It seems that there is HW issue which may cause ESG bit is cleared. + * This is a workaround. Clear the DONE bit before setting ESG bit. + */ + EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, kEDMA_DoneFlag); + EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_ESG_MASK; +#elif (CONFIG_DMA_MCUX_EDMA_V3 || CONFIG_DMA_MCUX_EDMA_V4) + /*We have not verified if this issue exist on V3/V4 HW, jut place a holder here. */ +#endif + /* TCDs are configured. Resume DMA */ + EDMA_EnableChannelRequest(DEV_BASE(dev), channel); + + /* Update the write index and available TCD numbers. */ + data->transfer_settings.write_idx = + (data->transfer_settings.write_idx + 1) % CONFIG_DMA_TCD_QUEUE_SIZE; + data->transfer_settings.empty_tcds--; + + LOG_DBG("w_idx:%d no:%d(ch:%d)", data->transfer_settings.write_idx, + data->transfer_settings.empty_tcds, channel); + + } else { + /* Dynamice Scatter/Gather mode: + * If the tcdPool is not in use (no s/g) then only a single TCD + * can be active at once. + */ + if (data->busy && data->edma_handle.tcdPool == NULL) { + LOG_ERR("EDMA busy. Wait until the transfer completes before reloading."); + ret = -EBUSY; + goto cleanup; + } + + EDMA_PrepareTransfer(&(data->transferConfig), (void *)src, + data->transfer_settings.source_data_size, (void *)dst, + data->transfer_settings.dest_data_size, + data->transfer_settings.source_burst_length, size, + data->transfer_settings.transfer_type); + + const status_t submit_status = + EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig)); + + if (submit_status != kStatus_Success) { + LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); + ret = -EFAULT; + } } cleanup: @@ -544,8 +748,12 @@ static int dma_mcux_edma_get_status(const struct device *dev, uint32_t channel, if (DEV_CHANNEL_DATA(dev, channel)->busy) { status->busy = true; + /* pending_length is in bytes. Multiply remaining major loop + * count by NBYTES for each minor loop + */ status->pending_length = - EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), hw_channel); + EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), hw_channel) * + DEV_CHANNEL_DATA(dev, channel)->transfer_settings.source_data_size; } else { status->busy = false; status->pending_length = 0; diff --git a/drivers/i2s/i2s_mcux_sai.c b/drivers/i2s/i2s_mcux_sai.c index 5ec1ff739c16f..27439d6f46b09 100644 --- a/drivers/i2s/i2s_mcux_sai.c +++ b/drivers/i2s/i2s_mcux_sai.c @@ -1,5 +1,5 @@ /* - * Copyright 2021,2023 NXP Semiconductor INC. + * Copyright 2021,2023-2024 NXP Semiconductor INC. * All rights reserved. * * SPDX-License-Identifier: Apache-2.0 @@ -299,7 +299,6 @@ static void i2s_dma_tx_callback(const struct device *dma_dev, void *arg, uint32_ strm->state = I2S_STATE_ERROR; goto disabled_exit_no_drop; } - dma_start(dev_data->dev_dma, strm->dma_channel); if (blocks_queued || (strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) { goto enabled_exit; @@ -389,7 +388,6 @@ static void i2s_dma_rx_callback(const struct device *dma_dev, void *arg, uint32_ ret); } - dma_start(dev_data->dev_dma, strm->dma_channel); } } else { i2s_rx_stream_disable(dev, true, false); @@ -1213,6 +1211,7 @@ static const struct i2s_driver_api i2s_mcux_driver_api = { .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, tx, \ source), \ + .cyclic = 1, \ }, \ }, \ .rx = \ @@ -1230,6 +1229,7 @@ static const struct i2s_driver_api i2s_mcux_driver_api = { .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, rx, \ source), \ + .cyclic = 1, \ }, \ }, \ }; \ diff --git a/drivers/serial/uart_mcux_lpuart.c b/drivers/serial/uart_mcux_lpuart.c index a4c775c5b4e49..3b976d480814a 100644 --- a/drivers/serial/uart_mcux_lpuart.c +++ b/drivers/serial/uart_mcux_lpuart.c @@ -585,7 +585,7 @@ static void prepare_rx_dma_block_config(const struct device *dev) head_block_config->dest_address = (uint32_t)rx_dma_params->buf; head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart); head_block_config->block_size = rx_dma_params->buf_len; - head_block_config->dest_scatter_en = false; + head_block_config->dest_scatter_en = true; } static int configure_and_start_rx_dma( @@ -616,37 +616,20 @@ static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev) struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; - struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len); - /* There must be a buffer to replace this one with */ assert(data->async.next_rx_buffer != NULL); assert(data->async.next_rx_buffer_len != 0U); - rx_dma_params->buf = data->async.next_rx_buffer; - rx_dma_params->buf_len = data->async.next_rx_buffer_len; - rx_dma_params->offset = 0; - rx_dma_params->counter = 0; - data->async.next_rx_buffer = NULL; - data->async.next_rx_buffer_len = 0U; const int success = dma_reload(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel, - LPUART_GetDataRegisterAddress(lpuart), (uint32_t)rx_dma_params->buf, - rx_dma_params->buf_len); + LPUART_GetDataRegisterAddress(lpuart), + (uint32_t)data->async.next_rx_buffer, data->async.next_rx_buffer_len); if (success != 0) { LOG_ERR("Error %d reloading DMA with next RX buffer", success); } - /* Request next buffer */ - async_evt_rx_buf_request(dev); - - int ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel); - - if (ret < 0) { - LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)", config->rx_dma_config.dma_channel, - ret); - } return success; } @@ -692,9 +675,16 @@ static void dma_callback(const struct device *dma_dev, void *callback_arg, uint3 async_evt_rx_rdy(dev); async_evt_rx_buf_release(dev); - if (data->async.next_rx_buffer != NULL && data->async.next_rx_buffer_len > 0) { + /* Remember the buf so it can be released after it is done. */ + rx_dma_params->buf = data->async.next_rx_buffer; + rx_dma_params->buf_len = data->async.next_rx_buffer_len; + data->async.next_rx_buffer = NULL; + data->async.next_rx_buffer_len = 0U; + + /* A new buffer was available (and already loaded into the DMA engine) */ + if (rx_dma_params->buf != NULL && rx_dma_params->buf_len > 0) { /* Request the next buffer */ - uart_mcux_lpuart_dma_replace_rx_buffer(dev); + async_evt_rx_buf_request(dev); } else { /* Buffer full without valid next buffer, disable RX DMA */ LOG_INF("Disabled RX DMA, no valid next buffer "); @@ -873,6 +863,7 @@ static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t assert(data->async.next_rx_buffer_len == 0); data->async.next_rx_buffer = buf; data->async.next_rx_buffer_len = len; + uart_mcux_lpuart_dma_replace_rx_buffer(dev); irq_unlock(key); return 0; } @@ -1347,7 +1338,8 @@ static const struct uart_driver_api mcux_lpuart_driver_api = { .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ id, rx, source), \ .dma_callback = dma_callback, \ - .user_data = (void *)DEVICE_DT_INST_GET(id) \ + .user_data = (void *)DEVICE_DT_INST_GET(id), \ + .cyclic = 1, \ }, \ }, #else