From 09dec22f384b1d8059336146f4cdbff529d0861e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Mon, 30 Jun 2025 08:03:06 +0200 Subject: [PATCH 01/13] dts: bindings: serial: nrf-uarte: Add timer property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Timer property indicates which TIMER instance should be used for byte counting. If timer property is present then given instance is using TIMER to count received bytes. Signed-off-by: Krzysztof Chruściński --- dts/bindings/serial/nordic,nrf-uarte.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dts/bindings/serial/nordic,nrf-uarte.yaml b/dts/bindings/serial/nordic,nrf-uarte.yaml index 4e9ccb0dd4006..c8eb2bbc758db 100644 --- a/dts/bindings/serial/nordic,nrf-uarte.yaml +++ b/dts/bindings/serial/nordic,nrf-uarte.yaml @@ -24,3 +24,11 @@ properties: type: boolean description: | UARTE allows usage of cross domain pins with constant latency mode required. + + timer: + type: phandle + description: | + Timer instance used to count received bytes. Due to issues with frame timeout + feature it is required to reliably receive data in cases where flow control + is not used and new byte can appear on the line when frame timeout expires + but before it is handled. From 85d9863860abc0f46ec005a92c473e64381bb7e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Tue, 15 Jul 2025 10:13:10 +0200 Subject: [PATCH 02/13] drivers: serial: nrfx_uarte: Prepare code for extension MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rearrange code to prepare for upcoming extension that adds special receive mode. Signed-off-by: Krzysztof Chruściński --- drivers/serial/uart_nrfx_uarte.c | 133 +++++++++++++++++-------------- 1 file changed, 72 insertions(+), 61 deletions(-) diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index c53aa20754e40..dc0b76269e793 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -870,6 +870,78 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) static void rx_timeout(struct k_timer *timer); static void tx_timeout(struct k_timer *timer); +static void user_callback(const struct device *dev, struct uart_event *evt) +{ + struct uarte_nrfx_data *data = dev->data; + + if (data->async->user_callback) { + data->async->user_callback(dev, evt, data->async->user_data); + } +} + +static void rx_buf_release(const struct device *dev, uint8_t *buf) +{ + struct uart_event evt = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = buf, + }; + + user_callback(dev, &evt); +} + +static void notify_rx_disable(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uart_event evt = { + .type = UART_RX_DISABLED, + }; + + if (LOW_POWER_ENABLED(cfg)) { + uint32_t key = irq_lock(); + + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); + } + + user_callback(dev, (struct uart_event *)&evt); + + /* runtime PM is put after the callback. In case uart is re-enabled from that + * callback we avoid suspending/resuming the device. + */ + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_put_async(dev, K_NO_WAIT); + } +} + +static int uarte_nrfx_rx_disable(const struct device *dev) +{ + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + int key; + + if (async_rx->buf == NULL) { + return -EFAULT; + } + + k_timer_stop(&async_rx->timer); + + key = irq_lock(); + + if (async_rx->next_buf != NULL) { + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + } + + async_rx->enabled = false; + async_rx->discard_fifo = true; + + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + irq_unlock(key); + + return 0; +} + #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } @@ -1068,15 +1140,6 @@ static int uarte_nrfx_tx_abort(const struct device *dev) return 0; } -static void user_callback(const struct device *dev, struct uart_event *evt) -{ - struct uarte_nrfx_data *data = dev->data; - - if (data->async->user_callback) { - data->async->user_callback(dev, evt, data->async->user_data); - } -} - static void notify_uart_rx_rdy(const struct device *dev, size_t len) { struct uarte_nrfx_data *data = dev->data; @@ -1090,29 +1153,6 @@ static void notify_uart_rx_rdy(const struct device *dev, size_t len) user_callback(dev, &evt); } -static void rx_buf_release(const struct device *dev, uint8_t *buf) -{ - struct uart_event evt = { - .type = UART_RX_BUF_RELEASED, - .data.rx_buf.buf = buf, - }; - - user_callback(dev, &evt); -} - -static void notify_rx_disable(const struct device *dev) -{ - struct uart_event evt = { - .type = UART_RX_DISABLED, - }; - - user_callback(dev, (struct uart_event *)&evt); - - if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { - pm_device_runtime_put_async(dev, K_NO_WAIT); - } -} - #ifdef UARTE_HAS_FRAME_TIMEOUT static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) { @@ -1339,35 +1379,6 @@ static int uarte_nrfx_callback_set(const struct device *dev, return 0; } -static int uarte_nrfx_rx_disable(const struct device *dev) -{ - struct uarte_nrfx_data *data = dev->data; - struct uarte_async_rx *async_rx = &data->async->rx; - NRF_UARTE_Type *uarte = get_uarte_instance(dev); - int key; - - if (async_rx->buf == NULL) { - return -EFAULT; - } - - k_timer_stop(&async_rx->timer); - - key = irq_lock(); - - if (async_rx->next_buf != NULL) { - nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); - } - - async_rx->enabled = false; - async_rx->discard_fifo = true; - - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); - irq_unlock(key); - - return 0; -} - static void tx_timeout(struct k_timer *timer) { const struct device *dev = k_timer_user_data_get(timer); From 1487acd3f8aee911821bafc9059f0e310860bfc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Mon, 30 Jun 2025 07:48:03 +0200 Subject: [PATCH 03/13] drivers: serial: nrfx_uarte: Add mode with TIMER byte counting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add mode to be used on UARTE with frame timeout which is using a bounce buffers and TIMER to count bytes. This mode shall be used to reliably receive data without HWFC as frame timeout approach is not 100% reliable because it can loose or corrupt a byte when new byte arrives after frame timeout is detected but before it is fully handled. This mode is similar to the one enabled with CONFIG_UART_x_NRF_HW_ASYNC but additional bounce buffers are used and UARTE is receiving data to internal buffers and copies data to the user buffer. Legacy apporach cannot be used because in new SoC DMA attempts to copy data in words so when byte is received it stays in the DMA internal buffer until 4 bytes are received or end of transfer happens then internal DMA buffer is flushed. Signed-off-by: Krzysztof Chruściński --- drivers/serial/Kconfig.nrfx | 26 + drivers/serial/Kconfig.nrfx_uart_instance | 7 + drivers/serial/uart_nrfx_uarte.c | 1027 +++++++++++++++++++-- 3 files changed, 972 insertions(+), 88 deletions(-) diff --git a/drivers/serial/Kconfig.nrfx b/drivers/serial/Kconfig.nrfx index 50c81a4c60747..03e06bd721085 100644 --- a/drivers/serial/Kconfig.nrfx +++ b/drivers/serial/Kconfig.nrfx @@ -79,6 +79,32 @@ config UART_ASYNC_TX_CACHE_SIZE in RAM, because EasyDMA in UARTE peripherals can only transfer data from RAM. +config UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + bool "Use TIMER to count RX bytes" + depends on UART_ASYNC_API + depends on UART_NRFX_UARTE_LEGACY_SHIM + depends on !ARCH_POSIX # Mode not supported on BSIM target + select NRFX_GPPI + +config UART_NRFX_UARTE_BOUNCE_BUF_LEN + int "RX bounce buffer size" + depends on UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + default 256 + range 64 1024 + help + Buffer is used when workaround with bounce buffers is applied + +config UART_NRFX_UARTE_BOUNCE_BUF_SWAP_LATENCY + int "RX bounce buffer swap latency (in microseconds)" + depends on UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + default 300 + help + Option decides how long before current bounce buffer is filled driver + attempts to swap the buffer. It must be long enough to ensure that + space following the buffer is not overwritten. Too high value results + in more frequent buffer swaps so it impacts performance. Setting should + take into account potential interrupt handling latency. + config UART_NRFX_UARTE_DIRECT_ISR bool "Use direct ISR" default y if !MULTITHREADING diff --git a/drivers/serial/Kconfig.nrfx_uart_instance b/drivers/serial/Kconfig.nrfx_uart_instance index b1a68d691c45b..82ffaa10fcb9b 100644 --- a/drivers/serial/Kconfig.nrfx_uart_instance +++ b/drivers/serial/Kconfig.nrfx_uart_instance @@ -18,6 +18,13 @@ config UART_$(nrfx_uart_num)_ASYNC help This option enables UART Asynchronous API support on port $(nrfx_uart_num). +config UART_$(nrfx_uart_num)_COUNT_BYTES_WITH_TIMER + bool + depends on $(dt_nodelabel_has_prop,uart$(nrfx_uart_num),timer) + depends on HAS_HW_NRF_UARTE$(nrfx_uart_num) + default y + imply UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + config UART_$(nrfx_uart_num)_ENHANCED_POLL_OUT bool "Efficient poll out on port $(nrfx_uart_num)" depends on !$(dt_nodelabel_bool_prop,uart$(nrfx_uart_num),endtx-stoptx-supported) diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index dc0b76269e793..90247dfecc5c6 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -188,6 +188,16 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); /* Size of hardware fifo in RX path. */ #define UARTE_HW_RX_FIFO_SIZE 5 +/* TIMER CC channels for counting bytes with TIMER. */ +/* Channel used for capturing current counter value. */ +#define UARTE_TIMER_CAPTURE_CH 0 +/* Channel used to get compare event when number of received bytes reaches user buffer size. */ +#define UARTE_TIMER_USR_CNT_CH 1 +/* Channel used to get compare event when bounce buffer need to be switched. */ +#define UARTE_TIMER_BUF_SWITCH_CH 2 +/* Magic byte that is used to fill the buffer. */ +#define UARTE_MAGIC_BYTE 0xAA + #ifdef UARTE_ANY_ASYNC struct uarte_async_tx { @@ -201,6 +211,30 @@ struct uarte_async_tx { bool pending; }; +/* Structure with data for Count Bytes With Timer receiver mode (cbwt). */ +struct uarte_async_rx_cbwt { + uint8_t *curr_bounce_buf; + uint8_t *anomaly_byte_addr; + uint32_t usr_rd_off; + uint32_t usr_wr_off; + uint32_t bounce_off; + uint32_t bounce_limit; + uint32_t last_cnt; + uint32_t cc_usr; + uint32_t cc_swap; +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + size_t bounce_buf_swap_len; +#endif +#ifdef UARTE_ANY_CACHE + uint8_t *anomaly_byte_dst; + uint8_t anomaly_byte; +#endif + uint8_t bounce_idx; + uint8_t ppi_ch; + bool in_irq; + bool discard_fifo; +}; + struct uarte_async_rx { struct k_timer timer; #ifdef CONFIG_HAS_NORDIC_DMM @@ -212,8 +246,9 @@ struct uarte_async_rx { size_t offset; uint8_t *next_buf; size_t next_buf_len; -#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX -#if !defined(UARTE_HAS_FRAME_TIMEOUT) +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) uint32_t idle_cnt; #endif k_timeout_t timeout; @@ -286,6 +321,10 @@ struct uarte_nrfx_data { #define UARTE_FLAG_POLL_OUT BIT(3) /* Flag indicating that a workaround for not working frame timeout is active. */ #define UARTE_FLAG_FTIMEOUT_WATCH BIT(4) +/* Flag indicating that UART_RX_BUF_REQUEST event need to be called from the interrupt context. */ +#define UARTE_FLAG_RX_BUF_REQ BIT(5) +/* Flag indicating that CC value in TIMER was set too late. */ +#define UARTE_FLAG_LATE_CC BIT(6) /* If enabled then ENDTX is PPI'ed to TXSTOP */ #define UARTE_CFG_FLAG_PPI_ENDTX BIT(0) @@ -304,6 +343,12 @@ struct uarte_nrfx_data { /* Indicates that workaround for spurious RXTO during restart shall be applied. */ #define UARTE_CFG_FLAG_SPURIOUS_RXTO BIT(3) +/* Indicates that UARTE/TIMER interrupt priority differs from system clock (GRTC/RTC). */ +#define UARTE_CFG_FLAG_VAR_IRQ BIT(4) + +/* Indicates that instance needs special handling of BAUDRATE register. */ +#define UARTE_CFG_FLAG_VOLATILE_BAUDRATE BIT(5) + /* Formula for getting the baudrate settings is following: * 2^12 * (2^20 / (f_PCLK / desired_baudrate)) where f_PCLK is a frequency that * drives the UARTE. @@ -340,6 +385,14 @@ struct uarte_nrfx_data { (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \ (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0) +#define UARTE_MIN_BUF_SWAP_LEN 10 + +#define UARTE_US_TO_BYTES(baudrate) \ + DIV_ROUND_UP((CONFIG_UART_NRFX_UARTE_BOUNCE_BUF_SWAP_LATENCY * baudrate), 10000000) + +#define UARTE_BUF_SWAP_LEN(bounce_buf_len, baudrate) \ + ((bounce_buf_len) - MAX(UARTE_MIN_BUF_SWAP_LEN, UARTE_US_TO_BYTES(baudrate))) + #define LOW_POWER_ENABLED(_config) \ (IS_ENABLED(UARTE_ANY_LOW_POWER) && \ !IS_ENABLED(CONFIG_PM_DEVICE) && \ @@ -386,6 +439,15 @@ struct uarte_nrfx_config { #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef UARTE_ANY_ASYNC +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + NRF_TIMER_Type * timer_regs; + IRQn_Type timer_irqn; + IRQn_Type uarte_irqn; + uint8_t *bounce_buf[2]; + size_t bounce_buf_len; + size_t bounce_buf_swap_len; + struct uarte_async_rx_cbwt *cbwt_data; +#endif nrfx_timer_t timer; uint8_t *tx_cache; uint8_t *rx_flush_buf; @@ -405,6 +467,12 @@ struct uarte_nrfx_config { (IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \ (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) +/* Determine if instance is using an approach with counting bytes with TIMER (cbwt). */ +#define IS_CBWT(dev) \ + COND_CODE_1(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER, \ + ((((const struct uarte_nrfx_config *)dev->config)->cbwt_data != NULL)), \ + (false)) + static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; @@ -596,10 +664,23 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) return -EINVAL; } +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + struct uarte_async_rx_cbwt *cbwt_data = config->cbwt_data; + + cbwt_data->bounce_buf_swap_len = UARTE_BUF_SWAP_LEN(config->bounce_buf_len, + baudrate); + } +#endif + #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND - struct uarte_nrfx_data *data = dev->data; + if (config->flags & UARTE_CFG_FLAG_VOLATILE_BAUDRATE) { + struct uarte_nrfx_data *data = dev->data; - data->nrf_baudrate = nrf_baudrate; + data->nrf_baudrate = nrf_baudrate; + } else { + nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate); + } #else nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate); #endif @@ -787,9 +868,11 @@ static void uarte_periph_enable(const struct device *dev) #endif #if UARTE_BAUDRATE_RETENTION_WORKAROUND - nrf_uarte_baudrate_set(uarte, - COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, - (data->nrf_baudrate), (config->nrf_baudrate))); + if (config->flags & UARTE_CFG_FLAG_VOLATILE_BAUDRATE) { + nrf_uarte_baudrate_set(uarte, + COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, + (data->nrf_baudrate), (config->nrf_baudrate))); + } #endif #ifdef UARTE_ANY_ASYNC @@ -889,13 +972,17 @@ static void rx_buf_release(const struct device *dev, uint8_t *buf) user_callback(dev, &evt); } -static void notify_rx_disable(const struct device *dev) +static void rx_disable_finalize(const struct device *dev) { const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; struct uart_event evt = { .type = UART_RX_DISABLED, }; + async_rx->enabled = false; + if (LOW_POWER_ENABLED(cfg)) { uint32_t key = irq_lock(); @@ -913,28 +1000,42 @@ static void notify_rx_disable(const struct device *dev) } } -static int uarte_nrfx_rx_disable(const struct device *dev) +static int rx_disable(const struct device *dev, bool api) { struct uarte_nrfx_data *data = dev->data; struct uarte_async_rx *async_rx = &data->async->rx; NRF_UARTE_Type *uarte = get_uarte_instance(dev); int key; - if (async_rx->buf == NULL) { - return -EFAULT; - } - k_timer_stop(&async_rx->timer); key = irq_lock(); +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + if (cbwt_data) { + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_BUF_SWITCH_CH)); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_int_disable(cfg->timer_regs, + nrf_timer_compare_int_get(UARTE_TIMER_BUF_SWITCH_CH) | + nrf_timer_compare_int_get(UARTE_TIMER_USR_CNT_CH)); + nrf_uarte_shorts_disable(cfg->uarte_regs, NRF_UARTE_SHORT_ENDRX_STARTRX); + } +#endif + if (async_rx->next_buf != NULL) { nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); } async_rx->enabled = false; - async_rx->discard_fifo = true; + if (api) { + async_rx->discard_fifo = true; + } nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); irq_unlock(key); @@ -942,6 +1043,18 @@ static int uarte_nrfx_rx_disable(const struct device *dev) return 0; } +static int uarte_nrfx_rx_disable(const struct device *dev) +{ + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + + if (async_rx->buf == NULL) { + return -EFAULT; + } + + return rx_disable(dev, true); +} + #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } @@ -966,10 +1079,10 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) if (ret != NRFX_SUCCESS) { LOG_ERR("Timer already initialized"); return -EINVAL; - } else { - nrfx_timer_clear(&cfg->timer); } + nrfx_timer_clear(&cfg->timer); + ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); @@ -987,6 +1100,653 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) } #endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */ +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + +static uint32_t get_byte_cnt(NRF_TIMER_Type *timer) +{ + nrf_timer_task_trigger(timer, nrf_timer_capture_task_get(UARTE_TIMER_CAPTURE_CH)); + + nrf_barrier_w(); + + return nrf_timer_cc_get(timer, UARTE_TIMER_CAPTURE_CH); +} + +static void rx_buf_req(const struct device *dev) +{ + struct uart_event evt = { + .type = UART_RX_BUF_REQUEST, + }; + + user_callback(dev, &evt); +} + +static bool notify_rx_rdy(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + size_t len = cbwt_data->usr_wr_off - cbwt_data->usr_rd_off; + + if (len == 0) { + return async_rx->buf != NULL; + } + + struct uart_event evt = { + .type = UART_RX_RDY, + .data.rx.buf = async_rx->buf, + .data.rx.len = len, + .data.rx.offset = cbwt_data->usr_rd_off + }; + user_callback(dev, &evt); + cbwt_data->usr_rd_off += len; + + if (cbwt_data->usr_rd_off == async_rx->buf_len) { + rx_buf_release(dev, async_rx->buf); + async_rx->buf = async_rx->next_buf; + async_rx->buf_len = async_rx->next_buf_len; + async_rx->next_buf_len = 0; + async_rx->next_buf = 0; + cbwt_data->usr_rd_off = 0; + cbwt_data->usr_wr_off = 0; + + if (async_rx->buf_len == 0) { + return false; + } + + /* Set past value to ensure that event will not expire after clearing but + * before setting the new value. + */ + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, cbwt_data->cc_usr - 1); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + cbwt_data->cc_usr += async_rx->buf_len; + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, cbwt_data->cc_usr); + + /* Check if CC is already in the past. In that case trigger CC handling.*/ + if (cbwt_data->cc_usr <= get_byte_cnt(cfg->timer_regs)) { + atomic_or(&data->flags, UARTE_FLAG_LATE_CC); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); + } else { + atomic_and(&data->flags, ~UARTE_FLAG_LATE_CC); + } + } + + return true; +} + +static void anomaly_byte_handle(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint8_t curr_byte, anomaly_byte; + uint32_t diff; + + if (cbwt_data->anomaly_byte_addr == NULL) { + return; + } + + diff = cfg->uarte_regs->DMA.RX.PTR - (uint32_t)cbwt_data->curr_bounce_buf; + /* Anomaly can be checked only if more than 1 byte is received to the current buffer. */ + if (diff < 2) { + return; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(cbwt_data->curr_bounce_buf, 1); + sys_cache_data_invd_range(cbwt_data->anomaly_byte_addr, 1); + } + + curr_byte = cbwt_data->curr_bounce_buf[0]; + anomaly_byte = *cbwt_data->anomaly_byte_addr; + if ((curr_byte == UARTE_MAGIC_BYTE) && (anomaly_byte != UARTE_MAGIC_BYTE)) { +#ifdef UARTE_ANY_CACHE + if (cfg->flags & UARTE_CFG_FLAG_CACHEABLE) { + /* We cannot write directly to curr_bounce_buf as it is written by + * DMA and with cache operations data may be overwritten. Copying + * need to be postponed to the moment when user buffer is filled. + */ + cbwt_data->anomaly_byte = anomaly_byte; + cbwt_data->anomaly_byte_dst = &cbwt_data->curr_bounce_buf[0]; + } else { + cbwt_data->curr_bounce_buf[0] = anomaly_byte; + } +#else + cbwt_data->curr_bounce_buf[0] = anomaly_byte; +#endif + } + + cbwt_data->anomaly_byte_addr = NULL; +} + +static uint32_t fill_usr_buf(const struct device *dev, uint32_t len) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + uint8_t *buf = cfg->bounce_buf[cbwt_data->bounce_idx]; + uint32_t usr_rem = async_rx->buf_len - cbwt_data->usr_wr_off; + uint32_t bounce_rem = cbwt_data->bounce_limit - cbwt_data->bounce_off; + uint32_t cpy_len = MIN(bounce_rem, MIN(usr_rem, len)); + + __ASSERT(cpy_len + cbwt_data->bounce_off <= cfg->bounce_buf_len, + "Exceeding the buffer cpy_len:%d off:%d limit:%d", + cpy_len, cbwt_data->bounce_off, cbwt_data->bounce_limit); + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(&buf[cbwt_data->bounce_off], cpy_len); + } + + memcpy(&async_rx->buf[cbwt_data->usr_wr_off], &buf[cbwt_data->bounce_off], cpy_len); +#ifdef UARTE_ANY_CACHE + if ((buf == cbwt_data->anomaly_byte_dst) && (cbwt_data->bounce_off == 0)) { + async_rx->buf[cbwt_data->usr_wr_off] = cbwt_data->anomaly_byte; + cbwt_data->anomaly_byte_dst = NULL; + } +#endif + cbwt_data->bounce_off += cpy_len; + cbwt_data->usr_wr_off += cpy_len; + cbwt_data->last_cnt += cpy_len; + if (cbwt_data->bounce_off == cbwt_data->bounce_limit) { + /* Bounce buffer drained */ + cbwt_data->bounce_idx = cbwt_data->bounce_idx == 0 ? 1 : 0; + cbwt_data->bounce_off = 0; + cbwt_data->bounce_limit = cfg->bounce_buf_len; + } + + return cpy_len; +} + +static bool update_usr_buf(const struct device *dev, uint32_t len, bool notify_any, bool buf_req) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + anomaly_byte_handle(dev); + + do { + uint32_t cpy_len = len ? fill_usr_buf(dev, len) : 0; + bool usr_buf_full = cbwt_data->usr_wr_off == async_rx->buf_len; + + len -= cpy_len; + if (((len == 0) && notify_any) || usr_buf_full) { + if (!notify_rx_rdy(dev)) { + return false; + } + + if (usr_buf_full && buf_req) { + rx_buf_req(dev); + } + } + } while (len > 0); + + return true; +} + +static void prepare_bounce_buf(const struct device *dev, uint8_t *buf, + size_t swap_len, size_t len) +{ + const struct uarte_nrfx_config *cfg = dev->config; + + buf[0] = UARTE_MAGIC_BYTE; + for (size_t i = swap_len; i < len; i++) { + buf[i] = UARTE_MAGIC_BYTE; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range(buf, 1); + sys_cache_data_flush_range(&buf[swap_len], len); + } +} + +/* This function is responsible for swapping the bounce buffer and it is the most + * tricky part of the solution. Receiver is continuously working and we want to + * change DMA pointer on the fly. DMA is also incrementing that pointer so there are + * moments in the reception when updating the pointer will result in different behavior. + * + * There are two main cases that need to be handled: + * 1. PTR is updated and there was no byte boundary (in the middle of a byte or there is + * no byte on the line). It is a safe spot. + * + * The most common and simplest case. PTR is update but since + * DMA already started the reception of the previous byte it means that next byte will + * be stored in the previous PTR and bytes following that byte will be stored to the + * new bounce buffer + * + * 2. Updating the pointer collided with byte boundary. + * + * RXDRDY and RXSTARTED events are used to detect if collision occurred. + * There are few scenarios that may happen and the driver must detect which one occurred. + * Detection is done by reading back the PTR register. Following cases are considered: + * + * - PTR did not change. It means that it was written after byte boundary. It is the same + * case as if PTR was updated in the safe spot. + * + * - PTR is updated by 1. There is an anomaly and it is unclear where next byte will be + * copied. PTR state indicates that it should be copied to the beginning of the new + * bounce buffer but it might be copied to the previous bounce buffer. Both locations + * are written with a magic byte (0xAA) and later on it is checked which location has + * changed and if byte was written to the previous bounce buffer it is copied to the + * start of the new bounce buffer. + * + * - PTR is not updated with the new bounce buffer location. DMA is incrementing PTR content + * and it is possible that SW writes new value between read and modify and DMA may + * overwrite value written by the driver. In that case reception continuous to the + * previous bounce buffer and swap procedure need to be repeated. + */ +static int bounce_buf_swap(const struct device *dev, uint8_t *prev_bounce_buf) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t prev_buf_cnt, new_cnt, cnt, ptr; + uint32_t prev_buf_inc = 1; + int key; + + key = irq_lock(); + /* Clear events that indicates byte boundary and set PTR. If events are set + * after PTR is set then we know that setting PTR collided with byte boundary. + */ + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED); + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + cfg->uarte_regs->DMA.RX.PTR = (uint32_t)cbwt_data->curr_bounce_buf; + cnt = get_byte_cnt(cfg->timer_regs); + + if (!nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY) && + !nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED)) { + /* RXDRDY did not happen when PTR was set. Safest case. PTR was updated + * correctly. Last byte will be received to the previous buffer. + */ + new_cnt = 0; + prev_buf_cnt = cnt - cbwt_data->last_cnt; + goto no_collision; + } + + /* Setting PTR collided with byte boundary we need to detect what happened. */ + while (!nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED)) { + } + + /* Read pointer when there is no new byte coming. */ + do { + cnt = get_byte_cnt(cfg->timer_regs); + ptr = cfg->uarte_regs->DMA.RX.PTR; + } while (cnt != get_byte_cnt(cfg->timer_regs)); + + new_cnt = ptr - (uint32_t)cbwt_data->curr_bounce_buf; + prev_buf_cnt = cnt - cbwt_data->last_cnt; + + if (new_cnt == 0) { + /* New PTR is not incremented. It was written after LIST post ENDRX + * incrementation. + */ + } else if (new_cnt == 1) { + /* new_cnt == 1. New PTR incremented. It's possible that data is already + * copied to that new location or it is written to the tail of the previous + * bounce buffer. We try to detect what happens. + */ + prev_buf_inc = 0; + cbwt_data->anomaly_byte_addr = + &prev_bounce_buf[cbwt_data->bounce_off + prev_buf_cnt]; + } else if (new_cnt <= cfg->bounce_buf_len) { + prev_buf_inc = 0; + prev_buf_cnt = cnt - cbwt_data->last_cnt - (new_cnt - 1); + } else { + /* New PTR value is not set. Re-set PTR is needed. Transfer continues to + * previous buffer whole buffer swapping need to be repeat. + */ + irq_unlock(key); + return -EAGAIN; + } + +no_collision: + cbwt_data->bounce_limit = cbwt_data->bounce_off + prev_buf_cnt + prev_buf_inc; + __ASSERT(cbwt_data->bounce_limit < cfg->bounce_buf_len, + "Too high limit (%d, max:%d), increase latency", + cbwt_data->bounce_limit, cfg->bounce_buf_len); + irq_unlock(key); + + return prev_buf_cnt; +} + +static size_t get_swap_len(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + return cbwt_data->bounce_buf_swap_len; +#else + return cfg->bounce_buf_swap_len; +#endif +} + +static void bounce_buf_switch(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + int new_data = cbwt_data->cc_swap - cbwt_data->last_cnt; + uint8_t *prev_bounce_buf = cbwt_data->curr_bounce_buf; + int prev_cnt; + + /* Fill user buffer with all pending data. */ + if (!update_usr_buf(dev, new_data < 0 ? 0 : new_data, false, true)) { + rx_disable(dev, false); + return; + } + + cbwt_data->curr_bounce_buf = (cbwt_data->curr_bounce_buf == cfg->bounce_buf[0]) ? + cfg->bounce_buf[1] : cfg->bounce_buf[0]; + prepare_bounce_buf(dev, cbwt_data->curr_bounce_buf, get_swap_len(dev), + cfg->bounce_buf_len); + + /* Swapping may need retry. */ + while ((prev_cnt = bounce_buf_swap(dev, prev_bounce_buf)) < 0) { + } + + /* Update user buffer with data that was received during swapping. */ + if (update_usr_buf(dev, prev_cnt, false, true)) { + /* Set compare event for next moment when bounce buffers need to be swapped. */ + cbwt_data->cc_swap += get_swap_len(dev); + __ASSERT(cbwt_data->cc_swap > get_byte_cnt(cfg->timer_regs), + "Setting CC too late next:%d cnt:%d", + cbwt_data->cc_swap, get_byte_cnt(cfg->timer_regs)); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH, cbwt_data->cc_swap); + } else { + /* Stop RX. */ + rx_disable(dev, false); + } +} + +static void usr_buf_complete(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem = async_rx->buf_len - cbwt_data->usr_wr_off; + + __ASSERT_NO_MSG(rem <= (get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt)); + + if (!update_usr_buf(dev, rem, true, true)) { + /* Stop RX if there is no next buffer. */ + rx_disable(dev, false); + } +} + +static void notify_new_data(const struct device *dev, bool buf_req) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t cnt = get_byte_cnt(cfg->timer_regs); + uint32_t new_data = cnt - cbwt_data->last_cnt; + + (void)update_usr_buf(dev, new_data, true, buf_req); +} + +static void cbwt_rx_timeout(struct k_timer *timer) +{ + const struct device *dev = k_timer_user_data_get(timer); + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + struct uarte_async_rx *async_rx = &data->async->rx; + + if (nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY)) { + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + async_rx->idle_cnt = 0; + } else { + async_rx->idle_cnt++; + if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) { + if (cfg->flags & UARTE_CFG_FLAG_VAR_IRQ) { + if (cbwt_data->in_irq) { + /* TIMER or UARTE interrupt preempted. Lets try again + * later. + */ + k_timer_start(timer, async_rx->timeout, K_NO_WAIT); + return; + } + irq_disable(cfg->uarte_irqn); + irq_disable(cfg->timer_irqn); + } + + nrf_uarte_int_enable(cfg->uarte_regs, NRF_UARTE_INT_RXDRDY_MASK); + notify_new_data(dev, true); + + if (cfg->flags & UARTE_CFG_FLAG_VAR_IRQ) { + irq_enable(cfg->uarte_irqn); + irq_enable(cfg->timer_irqn); + } + return; + } + } + + k_timer_start(timer, async_rx->timeout, K_NO_WAIT); +} + +static void cbwt_rx_flush_handle(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem_data = get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt; + uint32_t bbuf_rem_data = cbwt_data->bounce_limit - cbwt_data->bounce_off; + uint32_t amount; + uint8_t *dst; + + nrf_uarte_rx_buffer_set(uarte, cfg->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX); + while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { + /* empty */ + } + + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { + /* FIFO is empty. */ + return; + } + + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + amount = nrf_uarte_rx_amount_get(uarte); + + if (rem_data <= bbuf_rem_data) { + /* instead of -1 it should be -amount but RXDRDY event is not generated + * for bytes following first that goes to FIFO they are generated during flushing. + */ + dst = &cfg->bounce_buf[cbwt_data->bounce_idx][cbwt_data->bounce_off + rem_data - 1]; + } else { + /* See comment in if clause. */ + dst = &cbwt_data->curr_bounce_buf[rem_data - bbuf_rem_data - 1]; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(cfg->rx_flush_buf, amount); + sys_cache_data_invd_range(dst, amount); + } + + memcpy(dst, cfg->rx_flush_buf, amount); +} + +static void cbwt_rxto_isr(const struct device *dev, bool do_flush) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + if (async_rx->buf) { + notify_new_data(dev, false); + } + + if (async_rx->buf) { + rx_buf_release(dev, async_rx->buf); + async_rx->buf = NULL; + } + + if (async_rx->next_buf) { + rx_buf_release(dev, async_rx->next_buf); + async_rx->next_buf = NULL; + } + + if (do_flush) { + cbwt_rx_flush_handle(dev); + } + + if (async_rx->discard_fifo) { + cbwt_data->discard_fifo = async_rx->discard_fifo; + async_rx->discard_fifo = false; + } + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_STOP); + rx_disable_finalize(dev); +} + +static bool timer_ch_evt_check_clear(NRF_TIMER_Type *timer, uint32_t ch) +{ + nrf_timer_event_t evt = nrf_timer_compare_event_get(ch); + + if (nrf_timer_event_check(timer, evt)) { + nrf_timer_event_clear(timer, evt); + return true; + } + + return false; +} + +static void timer_isr(const void *arg) +{ + const struct device *dev = arg; + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + static const uint32_t flags_to_check = UARTE_FLAG_RX_BUF_REQ | + UARTE_FLAG_TRIG_RXTO | + UARTE_FLAG_LATE_CC; + uint32_t flags = atomic_and(&data->flags, ~flags_to_check); + + cbwt_data->in_irq = true; + + if (timer_ch_evt_check_clear(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH) || + (flags & UARTE_FLAG_LATE_CC)) { + usr_buf_complete(dev); + } + + /* Must be after user buf complet CC handling. */ + if (timer_ch_evt_check_clear(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH)) { + bounce_buf_switch(dev); + } + + if (flags & UARTE_FLAG_RX_BUF_REQ) { + rx_buf_req(dev); + } + + if (flags & UARTE_FLAG_TRIG_RXTO) { + cbwt_rxto_isr(dev, false); + } + + cbwt_data->in_irq = false; +} + +static void cbwt_rx_enable(const struct device *dev, bool with_timeout) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem_data; + uint32_t len = async_rx->buf_len; + uint32_t rx_int_mask = NRF_UARTE_INT_RXTO_MASK | + (with_timeout ? NRF_UARTE_INT_RXDRDY_MASK : 0); + + if (cbwt_data->discard_fifo) { + rem_data = 0; + cbwt_data->discard_fifo = false; + } else { + rem_data = get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt; + } + + cbwt_data->usr_rd_off = 0; + cbwt_data->usr_wr_off = 0; + + if (rem_data >= len) { + atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); + return; + } else if (rem_data) { + (void)update_usr_buf(dev, rem_data, false, true); + len -= rem_data; + } + + prepare_bounce_buf(dev, cfg->bounce_buf[0], get_swap_len(dev), cfg->bounce_buf_len); + + cbwt_data->last_cnt = 0; + cbwt_data->bounce_off = 0; + cbwt_data->bounce_idx = 0; + cbwt_data->curr_bounce_buf = cfg->bounce_buf[0]; + cbwt_data->bounce_limit = cfg->bounce_buf_len; + /* Enable ArrayList. */ + nrf_uarte_shorts_enable(cfg->uarte_regs, NRF_UARTE_SHORT_ENDRX_STARTRX); + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + nrf_uarte_int_enable(cfg->uarte_regs, rx_int_mask); + nrf_uarte_rx_buffer_set(cfg->uarte_regs, cbwt_data->curr_bounce_buf, 1); + + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_BUF_SWITCH_CH)); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_int_enable(cfg->timer_regs, + nrf_timer_compare_int_get(UARTE_TIMER_BUF_SWITCH_CH) | + nrf_timer_compare_int_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_CLEAR); + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_START); + cbwt_data->cc_usr = len; + cbwt_data->cc_swap = get_swap_len(dev); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH, get_swap_len(dev)); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, len); + + atomic_or(&data->flags, UARTE_FLAG_RX_BUF_REQ); + nrf_uarte_task_trigger(cfg->uarte_regs, NRF_UARTE_TASK_STARTRX); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); +} + +static int cbwt_uarte_async_init(const struct device *dev) +{ + /* As this approach does not use nrfx_timer driver but only HAL special setup + * function is used. + */ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + static const uint32_t rx_int_mask = NRF_UARTE_INT_ERROR_MASK | + NRF_UARTE_INT_RXTO_MASK; + uint32_t evt = nrf_uarte_event_address_get(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + uint32_t tsk = nrf_timer_task_address_get(cfg->timer_regs, NRF_TIMER_TASK_COUNT); + nrfx_err_t ret; + + nrf_timer_mode_set(cfg->timer_regs, NRF_TIMER_MODE_COUNTER); + nrf_timer_bit_width_set(cfg->timer_regs, NRF_TIMER_BIT_WIDTH_32); + + ret = nrfx_gppi_channel_alloc(&cbwt_data->ppi_ch); + if (ret != NRFX_SUCCESS) { + return -ENOMEM; + } + + nrfx_gppi_channel_endpoints_setup(cbwt_data->ppi_ch, evt, tsk); + nrfx_gppi_channels_enable(BIT(cbwt_data->ppi_ch)); + +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + cbwt_data->bounce_buf_swap_len = cfg->bounce_buf_swap_len; +#endif + + /* Enable EasyDMA LIST feature (it is exposed in SPIM but not in UARTE). */ + *(volatile uint32_t *)((uint32_t)cfg->uarte_regs + 0x714) = 1; + nrf_uarte_int_enable(cfg->uarte_regs, rx_int_mask); + + return 0; +} +#endif /* CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER */ + static int uarte_async_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; @@ -999,6 +1759,17 @@ static int uarte_async_init(const struct device *dev) ((IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) && !IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) ? NRF_UARTE_INT_RXDRDY_MASK : 0); + k_timer_init(&data->async->rx.timer, rx_timeout, NULL); + k_timer_user_data_set(&data->async->rx.timer, (void *)dev); + k_timer_init(&data->async->tx.timer, tx_timeout, NULL); + k_timer_user_data_set(&data->async->tx.timer, (void *)dev); + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + return cbwt_uarte_async_init(dev); + } +#endif + #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) int ret = uarte_nrfx_rx_counting_init(dev); @@ -1009,11 +1780,6 @@ static int uarte_async_init(const struct device *dev) nrf_uarte_int_enable(uarte, rx_int_mask); - k_timer_init(&data->async->rx.timer, rx_timeout, NULL); - k_timer_user_data_set(&data->async->rx.timer, (void *)dev); - k_timer_init(&data->async->tx.timer, tx_timeout, NULL); - k_timer_user_data_set(&data->async->tx.timer, (void *)dev); - return 0; } @@ -1162,6 +1928,7 @@ static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) } #endif + static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) @@ -1171,6 +1938,11 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + bool with_timeout = timeout != SYS_FOREVER_US; +#endif + if (cfg->disable_rx) { __ASSERT(false, "TX only UARTE instance"); return -ENOTSUP; @@ -1185,35 +1957,45 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, } #ifdef CONFIG_HAS_NORDIC_DMM - uint8_t *dma_buf; - int ret = 0; + if (!IS_CBWT(dev)) { + void *dma_buf; + int ret = 0; - ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); - if (ret < 0) { - return ret; - } + ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, &dma_buf); + if (ret < 0) { + return ret; + } - async_rx->usr_buf = buf; - buf = dma_buf; + async_rx->usr_buf = buf; + buf = dma_buf; + } #endif -#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + #ifdef UARTE_HAS_FRAME_TIMEOUT - if (timeout != SYS_FOREVER_US) { + if (!IS_CBWT(dev) && with_timeout) { uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, - (data->uart_config.baudrate), (cfg->baudrate)); - - async_rx->timeout = K_USEC(timeout); + (data->uart_config.baudrate), (cfg->baudrate)); nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout)); nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); + } +#endif +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + async_rx->idle_cnt = 0; +#endif + + if (with_timeout) { + if (!IS_CBWT(dev) && IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) { + async_rx->timeout = K_USEC(timeout); + } else { + async_rx->timeout = with_timeout ? + K_USEC(timeout / RX_TIMEOUT_DIV) : K_NO_WAIT; + } } else { async_rx->timeout = K_NO_WAIT; } -#else - async_rx->timeout = (timeout == SYS_FOREVER_US) ? - K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV); - async_rx->idle_cnt = 0; -#endif /* UARTE_HAS_FRAME_TIMEOUT */ #else async_rx->timeout_us = timeout; async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV; @@ -1241,7 +2023,19 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, } } pm_device_runtime_get(dev); + } else if (LOW_POWER_ENABLED(cfg)) { + unsigned int key = irq_lock(); + + uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); + } + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + cbwt_rx_enable(dev, with_timeout); + return 0; } +#endif if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(cfg)) { if (async_rx->flush_cnt) { @@ -1279,7 +2073,7 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, return 0; } else { #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX - if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { + if (with_timeout) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT); @@ -1304,13 +2098,6 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, async_rx->enabled = true; - if (LOW_POWER_ENABLED(cfg)) { - unsigned int key = irq_lock(); - - uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX); - irq_unlock(key); - } - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); return 0; @@ -1329,29 +2116,33 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, err = -EACCES; } else if (async_rx->next_buf == NULL) { #ifdef CONFIG_HAS_NORDIC_DMM - uint8_t *dma_buf; - const struct uarte_nrfx_config *config = dev->config; + if (!IS_CBWT(dev)) { + uint8_t *dma_buf; + const struct uarte_nrfx_config *config = dev->config; - err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); - if (err < 0) { - return err; + err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); + if (err < 0) { + return err; + } + async_rx->next_usr_buf = buf; + buf = dma_buf; } - async_rx->next_usr_buf = buf; - buf = dma_buf; #endif async_rx->next_buf = buf; async_rx->next_buf_len = len; - nrf_uarte_rx_buffer_set(uarte, buf, len); - /* If buffer is shorter than RX FIFO then there is a risk that due - * to interrupt handling latency ENDRX event is not handled on time - * and due to ENDRX_STARTRX short data will start to be overwritten. - * In that case short is not enabled and ENDRX event handler will - * manually start RX for that buffer. Thanks to RX FIFO there is - * 5 byte time for doing that. If interrupt latency is higher and - * there is no HWFC in both cases data will be lost or corrupted. - */ - if (len >= UARTE_HW_RX_FIFO_SIZE) { - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + if (!IS_CBWT(dev)) { + nrf_uarte_rx_buffer_set(uarte, buf, len); + /* If buffer is shorter than RX FIFO then there is a risk that due + * to interrupt handling latency ENDRX event is not handled on time + * and due to ENDRX_STARTRX short data will start to be overwritten. + * In that case short is not enabled and ENDRX event handler will + * manually start RX for that buffer. Thanks to RX FIFO there is + * 5 byte time for doing that. If interrupt latency is higher and + * there is no HWFC in both cases data will be lost or corrupted. + */ + if (len >= UARTE_HW_RX_FIFO_SIZE) { + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } } err = 0; } else { @@ -1401,6 +2192,13 @@ static void rx_timeout(struct k_timer *timer) NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef UARTE_HAS_FRAME_TIMEOUT +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + cbwt_rx_timeout(timer); + return; + } +#endif + struct uarte_nrfx_data *data = dev->data; struct uarte_async_rx *async_rx = &data->async->rx; bool rxdrdy = nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY); @@ -1547,7 +2345,7 @@ static void error_isr(const struct device *dev) nrf_uarte_errorsrc_clear(uarte, err); user_callback(dev, &evt); - (void) uarte_nrfx_rx_disable(dev); + (void)rx_disable(dev, false); } static void rxstarted_isr(const struct device *dev) @@ -1766,7 +2564,6 @@ static void rxto_isr(const struct device *dev) * In the second case, additionally, data from the UARTE internal RX * FIFO need to be discarded. */ - async_rx->enabled = false; if (async_rx->discard_fifo) { async_rx->discard_fifo = false; #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) @@ -1794,15 +2591,7 @@ static void rxto_isr(const struct device *dev) #endif nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); #endif - - if (LOW_POWER_ENABLED(config)) { - uint32_t key = irq_lock(); - - uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX); - irq_unlock(key); - } - - notify_rx_disable(dev); + rx_disable_finalize(dev); } static void txstopped_isr(const struct device *dev) @@ -1892,10 +2681,12 @@ static void txstopped_isr(const struct device *dev) static void rxdrdy_isr(const struct device *dev) { -#if !defined(UARTE_HAS_FRAME_TIMEOUT) +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) struct uarte_nrfx_data *data = dev->data; -#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + NRF_UARTE_Type *uarte = get_uarte_instance(dev); data->async->rx.idle_cnt = 0; @@ -1927,8 +2718,9 @@ static void uarte_nrfx_isr_async(const void *arg) struct uarte_async_rx *async_rx = &data->async->rx; uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX); - if (!(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) - && event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { + if ((IS_CBWT(dev) || + !(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT))) && + event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { rxdrdy_isr(dev); } @@ -1956,6 +2748,12 @@ static void uarte_nrfx_isr_async(const void *arg) rxstarted_isr(dev); } +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev) && + event_check_clear(uarte, NRF_UARTE_EVENT_RXTO, NRF_UARTE_INT_RXTO_MASK, imask)) { + cbwt_rxto_isr(dev, true); + } else +#endif /* RXTO must be handled after ENDRX which should notify the buffer. * Skip if ENDRX is set when RXTO is set. It means that * ENDRX occurred after check for ENDRX in isr which may happen when @@ -1980,7 +2778,8 @@ static void uarte_nrfx_isr_async(const void *arg) txstopped_isr(dev); } - if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) { + if (!IS_CBWT(dev) && + (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO)) { #ifdef CONFIG_HAS_NORDIC_DMM int ret; @@ -1995,7 +2794,7 @@ static void uarte_nrfx_isr_async(const void *arg) rx_buf_release(dev, async_rx->buf); async_rx->buf_len = 0; async_rx->buf = NULL; - notify_rx_disable(dev); + rx_disable_finalize(dev); } } @@ -2608,6 +3407,45 @@ static int uarte_instance_deinit(const struct device *dev) return pm_device_driver_deinit(dev, uarte_nrfx_pm_action); } +#define UARTE_TIMER_REG(idx) (NRF_TIMER_Type *)DT_REG_ADDR(DT_PHANDLE(UARTE(idx), timer)) + +#define UARTE_TIMER_IRQN(idx) DT_IRQN(DT_PHANDLE(UARTE(idx), timer)) + +#define UARTE_TIMER_IRQ_PRIO(idx) DT_IRQ(DT_PHANDLE(UARTE(idx), timer), priority) + +#define UARTE_COUNT_BYTES_WITH_TIMER_CONFIG(idx) \ + IF_ENABLED(UARTE_HAS_PROP(idx, timer), \ + (.timer_regs = UARTE_TIMER_REG(idx), \ + .timer_irqn = UARTE_TIMER_IRQN(idx), \ + .uarte_irqn = DT_IRQN(UARTE(idx)), \ + .bounce_buf = { \ + uart##idx##_bounce_buf, \ + &uart##idx##_bounce_buf[sizeof(uart##idx##_bounce_buf) / 2] \ + }, \ + .bounce_buf_len = sizeof(uart##idx##_bounce_buf) / 2, \ + .bounce_buf_swap_len = UARTE_BUF_SWAP_LEN(sizeof(uart##idx##_bounce_buf) / 2,\ + UARTE_US_TO_BYTES(UARTE_PROP(idx, current_speed))), \ + .cbwt_data = &uart##idx##_bounce_data,)) + +#define UARTE_COUNT_BYTES_WITH_TIMER_VALIDATE_CONFIG(idx) \ + __ASSERT_NO_MSG(UARTE_TIMER_IRQ_PRIO(idx) == DT_IRQ(UARTE(idx), priority)) + +#define UARTE_TIMER_IRQ_CONNECT(idx, func) \ + IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + (UARTE_COUNT_BYTES_WITH_TIMER_VALIDATE_CONFIG(idx); \ + IRQ_CONNECT(UARTE_TIMER_IRQN(idx), UARTE_TIMER_IRQ_PRIO(idx), func, \ + DEVICE_DT_GET(UARTE(idx)), 0); \ + irq_enable(UARTE_TIMER_IRQN(idx));)) + +/* Macro sets flag to indicate that uart use different interrupt priority than the system clock. */ +#define UARTE_HAS_VAR_PRIO(idx) \ + COND_CODE_1(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + ((DT_IRQ(UARTE(idx), priority) != DT_IRQ(DT_NODELABEL(grtc), priority)) ? \ + UARTE_CFG_FLAG_VAR_IRQ : 0), (0)) + + #define UARTE_GET_ISR(idx) \ COND_CODE_1(CONFIG_UART_##idx##_ASYNC, (uarte_nrfx_isr_async), (uarte_nrfx_isr_int)) @@ -2623,16 +3461,18 @@ static int uarte_instance_deinit(const struct device *dev) )) /* Depending on configuration standard or direct IRQ is connected. */ -#define UARTE_IRQ_CONNECT(idx, irqn, prio) \ - COND_CODE_1(CONFIG_UART_NRFX_UARTE_NO_IRQ, (), \ - (COND_CODE_1(CONFIG_UART_NRFX_UARTE_DIRECT_ISR, \ - (IRQ_DIRECT_CONNECT(irqn, prio, uarte_##idx##_direct_isr, 0)), \ - (IRQ_CONNECT(irqn, prio, UARTE_GET_ISR(idx), DEVICE_DT_GET(UARTE(idx)), 0))))) +#define UARTE_IRQ_CONNECT(idx, irqn, prio) \ + COND_CODE_1(CONFIG_UART_NRFX_UARTE_NO_IRQ, (), \ + (COND_CODE_1(CONFIG_UART_NRFX_UARTE_DIRECT_ISR, \ + (IRQ_DIRECT_CONNECT(irqn, prio, uarte_##idx##_direct_isr, 0)), \ + (IRQ_CONNECT(irqn, prio, UARTE_GET_ISR(idx), \ + DEVICE_DT_GET(UARTE(idx)), 0))))) #define UARTE_IRQ_CONFIGURE(idx) \ do { \ UARTE_IRQ_CONNECT(idx, DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority)); \ irq_enable(DT_IRQN(UARTE(idx))); \ + UARTE_TIMER_IRQ_CONNECT(idx, timer_isr) \ } while (false) /* Low power mode is used when disable_rx is not defined or in async mode if @@ -2739,6 +3579,12 @@ static int uarte_instance_deinit(const struct device *dev) UARTE_INT_DRIVEN(idx); \ PINCTRL_DT_DEFINE(UARTE(idx)); \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \ + IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + (static uint8_t uart##idx##_bounce_buf[CONFIG_UART_NRFX_UARTE_BOUNCE_BUF_LEN] \ + DMM_MEMORY_SECTION(UARTE(idx)); \ + static struct uarte_async_rx_cbwt uart##idx##_bounce_data; \ + )) \ static uint8_t \ uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \ DMM_MEMORY_SECTION(UARTE(idx)); \ @@ -2780,6 +3626,10 @@ static int uarte_instance_deinit(const struct device *dev) (IS_ENABLED(CONFIG_UART_NRFX_UARTE_SPURIOUS_RXTO_WORKAROUND) && \ INSTANCE_IS_HIGH_SPEED(_, /*empty*/, idx, _) ? \ UARTE_CFG_FLAG_SPURIOUS_RXTO : 0) | \ + ((IS_ENABLED(UARTE_BAUDRATE_RETENTION_WORKAROUND) && \ + UARTE_IS_CACHEABLE(idx)) ? \ + UARTE_CFG_FLAG_VOLATILE_BAUDRATE : 0) | \ + UARTE_HAS_VAR_PRIO(idx) | \ USE_LOW_POWER(idx), \ UARTE_DISABLE_RX_INIT(UARTE(idx)), \ .poll_out_byte = &uarte##idx##_poll_out_byte, \ @@ -2787,6 +3637,8 @@ static int uarte_instance_deinit(const struct device *dev) IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ (.tx_cache = uarte##idx##_tx_cache, \ .rx_flush_buf = uarte##idx##_flush_buf,)) \ + IF_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER, \ + (UARTE_COUNT_BYTES_WITH_TIMER_CONFIG(idx))) \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ (.timer = NRFX_TIMER_INSTANCE( \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ @@ -2839,5 +3691,4 @@ static int uarte_instance_deinit(const struct device *dev) #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);)) - UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ()) From 47d93f978196d2dfbc0585784f4fba824df20ff5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Mon, 7 Jul 2025 12:02:24 +0200 Subject: [PATCH 04/13] tests: drivers: uart: async_dual: Add progress report MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add reporting about the test progress. Test lasts few seconds and progress report helps to see if test stuck or how it is progressing. Signed-off-by: Krzysztof Chruściński --- tests/drivers/uart/uart_async_dual/src/main.c | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/drivers/uart/uart_async_dual/src/main.c b/tests/drivers/uart/uart_async_dual/src/main.c index 37ce73120acb8..d14e0f88a11e0 100644 --- a/tests/drivers/uart/uart_async_dual/src/main.c +++ b/tests/drivers/uart/uart_async_dual/src/main.c @@ -365,6 +365,26 @@ static void config_baudrate(uint32_t rate) } } +static void report_progress(uint32_t start) +{ + static const uint32_t inc = CONFIG_UART_ASYNC_DUAL_TEST_TIMEOUT / 20; + static uint32_t next; + static uint32_t progress; + + if ((k_uptime_get_32() - start < inc) && progress) { + /* Reset state. */ + next = inc; + progress = 0; + } + + if (k_uptime_get_32() > (start + next)) { + progress += 5; + TC_PRINT("\r%d%%", progress); + next += inc; + } +} + + /* Test is running following scenario. Transmitter is sending packets which * has 1 byte header with length followed by the payload. Transmitter can send * packets in two modes: bulk where data is send in chunks without gaps between @@ -380,6 +400,7 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) { int err; uint32_t load = 0; + uint32_t start = k_uptime_get_32(); config_baudrate(baudrate); @@ -420,8 +441,10 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) while (tx_data.cont || rx_data.cont) { fill_tx(&tx_data); k_msleep(1); + report_progress(start); try_tx(tx_dev, false); } + TC_PRINT("\n"); if (IS_ENABLED(CONFIG_CPU_LOAD)) { load = cpu_load_get(true); @@ -653,6 +676,7 @@ static void hci_like_rx(void) uint8_t len; bool cont; bool explicit_pm = IS_ENABLED(CONFIG_PM_RUNTIME_IN_TEST); + uint32_t start = k_uptime_get_32(); while (1) { if (explicit_pm) { @@ -704,7 +728,9 @@ static void hci_like_rx(void) PM_CHECK(rx_dev, tx_dev, false); check_payload(rx_data.buf, len); + report_progress(start); } + TC_PRINT("\n"); } #define HCI_LIKE_TX_STACK_SIZE 2048 From b0740b26673471fe715257ea7e1f7941ac8d3004 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Mon, 7 Jul 2025 12:02:46 +0200 Subject: [PATCH 05/13] tests: drivers: uart: async_dual: Extend testing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend test with a mode where HWFC is off and receiver is providing buffers on time. In that case receiver should be able to continuously receive data without losing any byte (even without HWFC). Additionally, TX data is chopped to verify that receiver does not loose bytes when new TX data collides with detected RX timeout. Signed-off-by: Krzysztof Chruściński --- tests/drivers/uart/uart_async_dual/Kconfig | 8 + tests/drivers/uart/uart_async_dual/src/main.c | 218 ++++++++++++++---- .../uart/uart_async_dual/testcase.yaml | 15 ++ 3 files changed, 191 insertions(+), 50 deletions(-) diff --git a/tests/drivers/uart/uart_async_dual/Kconfig b/tests/drivers/uart/uart_async_dual/Kconfig index 6e80ec3a79578..f0087aa48e3b6 100644 --- a/tests/drivers/uart/uart_async_dual/Kconfig +++ b/tests/drivers/uart/uart_async_dual/Kconfig @@ -16,5 +16,13 @@ config PM_RUNTIME_IN_TEST select PM_DEVICE select PM_DEVICE_RUNTIME +config TEST_CHOPPED_TX + bool "Test chopped TX data" + default y + help + When enabled then test cases that transmits TX packets in random chunks are + performed. Some driver implementation do not support case when new TX data + collides with handling of the RX timeout. + # Include Zephyr's Kconfig source "Kconfig" diff --git a/tests/drivers/uart/uart_async_dual/src/main.c b/tests/drivers/uart/uart_async_dual/src/main.c index d14e0f88a11e0..c2ad5d8a0a0c0 100644 --- a/tests/drivers/uart/uart_async_dual/src/main.c +++ b/tests/drivers/uart/uart_async_dual/src/main.c @@ -28,9 +28,10 @@ LOG_MODULE_REGISTER(test); #endif #define TX_TIMEOUT 100000 -#define RX_TIMEOUT 2000 +#define RX_TIMEOUT_BYTES 50 #define MAX_PACKET_LEN 128 +#define MIN_PACKET_LEN 10 struct dut_data { const struct device *dev; @@ -100,6 +101,7 @@ static const struct device *tx_dev; enum test_tx_mode { TX_BULK, TX_PACKETS, + TX_CHOPPED, }; struct test_tx_data { @@ -111,6 +113,8 @@ struct test_tx_data { volatile bool cont; volatile enum test_tx_mode mode; struct k_sem sem; + uint32_t idx; + uint32_t rx_timeout; }; enum test_rx_state { @@ -121,17 +125,21 @@ enum test_rx_state { enum test_rx_mode { RX_CONT, RX_DIS, + RX_ALL, }; struct test_rx_data { uint8_t hdr[1]; uint8_t buf[256]; uint32_t rx_cnt; + uint32_t payload_idx; enum test_rx_state state; enum test_rx_mode mode; volatile bool cont; bool buf_req; struct k_sem sem; + uint32_t timeout; + uint32_t buf_idx; }; static struct test_tx_data tx_data; @@ -143,8 +151,8 @@ static void fill_tx(struct test_tx_data *data) uint32_t len; int err; - if (data->mode == TX_PACKETS) { - err = k_sem_take(&data->sem, K_MSEC(100)); + if (data->mode != TX_BULK) { + err = k_sem_take(&data->sem, K_MSEC(200)); if (err < 0 && !data->cont) { return; } @@ -153,9 +161,10 @@ static void fill_tx(struct test_tx_data *data) uint8_t len = sys_rand8_get(); len = len % MAX_PACKET_LEN; - len = MAX(2, len); + len = MAX(MIN_PACKET_LEN, len); data->packet_len = len; + data->idx = 0; for (int i = 0; i < len; i++) { data->buf[i] = len - i; } @@ -163,12 +172,11 @@ static void fill_tx(struct test_tx_data *data) return; } - while ((len = ring_buf_put_claim(&data->rbuf, &buf, 255)) > 1) { + while ((len = ring_buf_put_claim(&data->rbuf, &buf, 255)) > 0) { uint8_t r = (sys_rand8_get() % MAX_PACKET_LEN) % len; - uint8_t packet_len = MAX(r, 2); - uint8_t rem = len - packet_len; + uint8_t packet_len = MAX(r, MIN_PACKET_LEN); - packet_len = (rem < 3) ? len : packet_len; + packet_len = (len <= MIN_PACKET_LEN) ? len : packet_len; buf[0] = packet_len; for (int i = 1; i < packet_len; i++) { buf[i] = packet_len - i; @@ -189,7 +197,7 @@ static void try_tx(const struct device *dev, bool irq) return; } - if ((tx_data.mode == TX_PACKETS) && (tx_data.packet_len > 0)) { + if (tx_data.mode == TX_PACKETS) { uint8_t len = tx_data.packet_len; tx_data.packet_len = 0; @@ -199,19 +207,50 @@ static void try_tx(const struct device *dev, bool irq) err, irq, tx_data.cont); return; } - zassert_true(tx_data.mode == TX_BULK); - if (!atomic_cas(&tx_data.busy, 0, 1)) { + if (tx_data.mode == TX_BULK) { + if (!atomic_cas(&tx_data.busy, 0, 1)) { + return; + } + + len = ring_buf_get_claim(&tx_data.rbuf, &buf, 255); + if (len > 0) { + err = uart_tx(dev, buf, len, TX_TIMEOUT); + zassert_equal(err, 0, + "Unexpected err:%d irq:%d cont:%d\n", + err, irq, tx_data.cont); + } else { + tx_data.busy = 0; + } return; } - len = ring_buf_get_claim(&tx_data.rbuf, &buf, 255); - if (len > 0) { - err = uart_tx(dev, buf, len, TX_TIMEOUT); - zassert_equal(err, 0, - "Unexpected err:%d irq:%d cont:%d\n", - err, irq, tx_data.cont); + zassert_true(tx_data.mode == TX_CHOPPED); + + uint32_t rem = tx_data.packet_len - tx_data.idx; + + if (tx_data.packet_len > 12) { + len = sys_rand8_get() % (tx_data.packet_len / 4); + } else { + len = 0; } + len = MAX(3, len); + len = MIN(rem, len); + + buf = &tx_data.buf[tx_data.idx]; + tx_data.idx += len; + + err = uart_tx(dev, buf, len, TX_TIMEOUT); + zassert_equal(err, 0, + "Unexpected err:%d irq:%d cont:%d\n", + err, irq, tx_data.cont); +} + +static void tx_backoff(uint32_t rx_timeout) +{ + uint32_t delay = (rx_timeout / 2) + (sys_rand32_get() % rx_timeout); + + k_busy_wait(delay); } static void on_tx_done(const struct device *dev, struct uart_event *evt) @@ -221,6 +260,17 @@ static void on_tx_done(const struct device *dev, struct uart_event *evt) return; } + if (tx_data.mode == TX_CHOPPED) { + if (tx_data.idx == tx_data.packet_len) { + k_sem_give(&tx_data.sem); + } else { + + tx_backoff(tx_data.rx_timeout); + try_tx(dev, true); + } + return; + } + /* Finish previous data chunk and start new if any pending. */ ring_buf_get_finish(&tx_data.rbuf, evt->data.tx.len); atomic_set(&tx_data.busy, 0); @@ -239,7 +289,16 @@ static void on_rx_rdy(const struct device *dev, struct uart_event *evt) rx_data.rx_cnt += evt->data.rx.len; if (evt->data.rx.buf == rx_data.hdr) { + if (rx_data.hdr[0] == 1) { + /* single byte packet. */ + err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); + return; + } + + zassert_equal(rx_data.payload_idx, 0); rx_data.state = RX_PAYLOAD; + rx_data.payload_idx = rx_data.hdr[0] - 1; if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { size_t l = rx_data.hdr[0] - 1; @@ -248,16 +307,19 @@ static void on_rx_rdy(const struct device *dev, struct uart_event *evt) err = uart_rx_buf_rsp(dev, rx_data.buf, rx_data.hdr[0] - 1); } } else { - /* Payload received */ - rx_data.state = RX_HDR; - zassert_equal(len, rx_data.hdr[0] - 1); - for (int i = 0; i < len; i++) { - bool ok = evt->data.rx.buf[off + i] == (uint8_t)(len - i); + bool ok; + + if ((rx_data.mode == RX_ALL) && (rx_data.payload_idx == 0)) { + rx_data.payload_idx = evt->data.rx.buf[off + i]; + ok = true; + } else { + ok = evt->data.rx.buf[off + i] == (uint8_t)rx_data.payload_idx; + } if (!ok) { LOG_ERR("Unexpected data at %d, exp:%02x got:%02x", - i, len - i, evt->data.rx.buf[off + i]); + i, rx_data.payload_idx, evt->data.rx.buf[off + i]); } zassert_true(ok, "Unexpected data at %d, exp:%02x got:%02x", @@ -270,21 +332,52 @@ static void on_rx_rdy(const struct device *dev, struct uart_event *evt) */ return; } - } - if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { - rx_data.buf_req = false; - err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + rx_data.payload_idx--; + if (rx_data.payload_idx == 0) { + if (rx_data.mode != RX_ALL) { + zassert_equal(i + 1, len, "len:%d i:%d", len, i); + } + rx_data.state = RX_HDR; + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { + rx_data.buf_req = false; + err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); + } + } } } } +static void on_rx_buf_req(const struct device *dev) +{ + if (rx_data.mode != RX_ALL) { + rx_data.buf_req = true; + return; + } + + size_t len = sizeof(rx_data.buf) / 2; + uint8_t *buf = &rx_data.buf[len * rx_data.buf_idx]; + + rx_data.buf_idx = (rx_data.buf_idx + 1) & 0x1; + uart_rx_buf_rsp(dev, buf, len); +} + static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *user_data) { ARG_UNUSED(evt); struct test_rx_data *data = user_data; int err; - uint8_t *buf = (data->state == RX_HDR) ? data->hdr : data->buf; - uint32_t len = (data->state == RX_HDR) ? 1 : (data->hdr[0] - 1); + uint8_t *buf; + uint32_t len; + + if (data->mode == RX_ALL) { + buf = data->buf; + len = sizeof(data->buf) / 2; + } else { + buf = (data->state == RX_HDR) ? data->hdr : data->buf; + len = (data->state == RX_HDR) ? 1 : (data->hdr[0] - 1); + data->buf_idx = 1; + } data->buf_req = false; @@ -294,7 +387,7 @@ static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *us zassert_true(len > 0); - err = uart_rx_enable(dev, buf, len, RX_TIMEOUT); + err = uart_rx_enable(dev, buf, len, data->timeout); zassert_equal(err, 0, "Unexpected err:%d", err); } @@ -330,8 +423,8 @@ static void uart_callback(const struct device *dev, struct uart_event *evt, void zassert_true(dev == rx_dev); break; case UART_RX_BUF_REQUEST: - rx_data.buf_req = true; zassert_true(dev == rx_dev); + on_rx_buf_req(dev); break; case UART_RX_DISABLED: zassert_true(dev == rx_dev); @@ -346,7 +439,7 @@ static void uart_callback(const struct device *dev, struct uart_event *evt, void } } -static void config_baudrate(uint32_t rate) +static void config_baudrate(uint32_t rate, bool hwfc) { struct uart_config config; int err; @@ -354,6 +447,7 @@ static void config_baudrate(uint32_t rate) err = uart_config_get(rx_dev, &config); zassert_equal(err, 0, "Unexpected err:%d", err); + config.flow_ctrl = hwfc ? UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE; config.baudrate = rate; err = uart_configure(rx_dev, &config); @@ -396,13 +490,14 @@ static void report_progress(uint32_t start) * * Test has busy simulator running if it is enabled in the configuration. */ -static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) +static void var_packet(uint32_t baudrate, enum test_tx_mode tx_mode, + enum test_rx_mode rx_mode, bool hwfc) { int err; uint32_t load = 0; uint32_t start = k_uptime_get_32(); - config_baudrate(baudrate); + config_baudrate(baudrate, hwfc); if (IS_ENABLED(CONFIG_TEST_BUSY_SIM)) { uint32_t active_avg = (baudrate == 1000000) ? 5 : 30; @@ -414,13 +509,15 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) memset(&tx_data, 0, sizeof(tx_data)); memset(&rx_data, 0, sizeof(rx_data)); tx_data.cont = true; - tx_data.mode = tx_packets ? TX_PACKETS : TX_BULK; - k_sem_init(&tx_data.sem, tx_packets ? 1 : 0, 1); + tx_data.mode = tx_mode; + k_sem_init(&tx_data.sem, (tx_mode != TX_BULK) ? 1 : 0, 1); + rx_data.timeout = (RX_TIMEOUT_BYTES * 1000000 * 10) / baudrate; + tx_data.rx_timeout = rx_data.timeout; rx_data.cont = true; rx_data.rx_cnt = 0; rx_data.state = RX_HDR; - rx_data.mode = cont ? RX_CONT : RX_DIS; + rx_data.mode = rx_mode; ring_buf_init(&tx_data.rbuf, sizeof(tx_data.buf), tx_data.buf); @@ -459,62 +556,82 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) /* Flush all TX data that may be already started. */ k_msleep(10); - (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), RX_TIMEOUT); + (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), rx_data.timeout); k_msleep(10); (void)uart_rx_disable(rx_dev); k_msleep(10); TC_PRINT("Received %d bytes for %d ms, CPU load:%d.%d\n", rx_data.rx_cnt, CONFIG_UART_ASYNC_DUAL_TEST_TIMEOUT, load / 10, load % 10); - zassert_true(rx_data.rx_cnt > 1000, "Unexected RX cnt: %d", rx_data.rx_cnt); + zassert_true(rx_data.rx_cnt > 1000, "Unexpected RX cnt: %d", rx_data.rx_cnt); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_dis_hwfc) { /* TX in bulk mode, RX in DIS mode, 115k2 */ - var_packet_hwfc(115200, false, false); + var_packet(115200, TX_BULK, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_cont_hwfc) { /* TX in bulk mode, RX in CONT mode, 115k2 */ - var_packet_hwfc(115200, false, true); + var_packet(115200, TX_BULK, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_dis_hwfc_1m) { /* TX in bulk mode, RX in DIS mode, 1M */ - var_packet_hwfc(1000000, false, false); + var_packet(1000000, TX_BULK, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_cont_hwfc_1m) { /* TX in bulk mode, RX in CONT mode, 1M */ - var_packet_hwfc(1000000, false, true); + var_packet(1000000, TX_BULK, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_dis_hwfc) { /* TX in packet mode, RX in DIS mode, 115k2 */ - var_packet_hwfc(115200, true, false); + var_packet(115200, TX_PACKETS, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_cont_hwfc) { /* TX in packet mode, RX in CONT mode, 115k2 */ - var_packet_hwfc(115200, true, true); + var_packet(115200, TX_PACKETS, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_dis_hwfc_1m) { /* TX in packet mode, RX in DIS mode, 1M */ - var_packet_hwfc(1000000, true, false); + var_packet(1000000, TX_PACKETS, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_cont_hwfc_1m) { /* TX in packet mode, RX in CONT mode, 1M */ - var_packet_hwfc(1000000, true, true); + var_packet(1000000, TX_PACKETS, RX_CONT, true); +} + +ZTEST(uart_async_dual, test_var_packets_chopped_all) +{ + if (!IS_ENABLED(CONFIG_TEST_CHOPPED_TX)) { + ztest_test_skip(); + } + + /* TX in chopped mode, RX in receive ALL mode, 115k2 */ + var_packet(115200, TX_CHOPPED, RX_ALL, false); +} + +ZTEST(uart_async_dual, test_var_packets_chopped_all_1m) +{ + if (!IS_ENABLED(CONFIG_TEST_CHOPPED_TX)) { + ztest_test_skip(); + } + + /* TX in chopped mode, RX in receive ALL mode, 1M */ + var_packet(1000000, TX_CHOPPED, RX_ALL, false); } static void hci_like_callback(const struct device *dev, struct uart_event *evt, void *user_data) @@ -563,7 +680,7 @@ static bool rx(uint8_t *buf, size_t len) { int err; - err = uart_rx_enable(rx_dev, buf, len, RX_TIMEOUT); + err = uart_rx_enable(rx_dev, buf, len, rx_data.timeout); zassert_equal(err, 0, "Unexpected err:%d", err); err = k_sem_take(&rx_data.sem, K_MSEC(100)); @@ -751,7 +868,7 @@ static void hci_like_test(uint32_t baudrate) int err; uint32_t load = 0; - config_baudrate(baudrate); + config_baudrate(baudrate, true); if (IS_ENABLED(CONFIG_TEST_BUSY_SIM)) { uint32_t active_avg = (baudrate == 1000000) ? 10 : 50; @@ -765,6 +882,7 @@ static void hci_like_test(uint32_t baudrate) tx_data.cnt = 0; tx_data.cont = true; rx_data.cont = true; + rx_data.timeout = (RX_TIMEOUT_BYTES * 1000000 * 10) / baudrate; k_sem_init(&tx_data.sem, 1, 1); k_sem_init(&rx_data.sem, 0, 1); @@ -802,7 +920,7 @@ static void hci_like_test(uint32_t baudrate) k_msleep(10); PM_CHECK(tx_dev, rx_dev, false); - (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), RX_TIMEOUT); + (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), rx_data.timeout); k_msleep(1); (void)uart_rx_disable(rx_dev); diff --git a/tests/drivers/uart/uart_async_dual/testcase.yaml b/tests/drivers/uart/uart_async_dual/testcase.yaml index 1a2d811bbfe91..1722d5e5e7d1d 100644 --- a/tests/drivers/uart/uart_async_dual/testcase.yaml +++ b/tests/drivers/uart/uart_async_dual/testcase.yaml @@ -73,3 +73,18 @@ tests: - nrf52_bsim extra_configs: - CONFIG_PM_RUNTIME_IN_TEST=y + drivers.uart.async_dual.no_tx_chopped: + harness: ztest + harness_config: + fixture: uart_loopback + depends_on: gpio + platform_allow: + - nrf54l15dk/nrf54l15/cpuapp + - nrf54h20dk/nrf54h20/cpuapp + - nrf54h20dk/nrf54h20/cpurad + - nrf54h20dk/nrf54h20/cpuppr + - nrf9160dk/nrf9160 + - nrf52_bsim + extra_configs: + - CONFIG_TEST_CHOPPED_TX=n + - CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER=n From f08f526ada78d2592efee8be86ff621204bf7135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Mon, 30 Jun 2025 08:08:26 +0200 Subject: [PATCH 06/13] tests: drivers: uart: async_dual: Update configuration for nrf54l MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add timer property to the uart device under test. Add zephyr,pm-device-runtime-auto to the uart device under test. Signed-off-by: Krzysztof Chruściński --- .../boards/nrf54l15dk_nrf54l15_cpuapp.overlay | 1 + .../boards/nrf54l15dk_nrf54l15_cpuapp.overlay | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay index a1e29cbf0ffcc..d8995e3697117 100644 --- a/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay @@ -42,6 +42,7 @@ dut: &uart21 { pinctrl-1 = <&uart21_sleep_alt>; pinctrl-names = "default", "sleep"; current-speed = <115200>; + timer = <&timer21>; }; dut2: &uart00 { diff --git a/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay b/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay index 93ad73f3b3738..224399ccaa955 100644 --- a/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay @@ -38,6 +38,10 @@ }; }; +&timer21 { + status = "reserved"; +}; + dut: &uart21 { status = "okay"; current-speed = <115200>; @@ -45,6 +49,8 @@ dut: &uart21 { pinctrl-1 = <&uart21_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + timer = <&timer21>; + zephyr,pm-device-runtime-auto; }; dut_aux: &uart22 { @@ -54,6 +60,7 @@ dut_aux: &uart22 { pinctrl-1 = <&uart22_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + zephyr,pm-device-runtime-auto; }; &timer20 { From c427b2cf7c525b799dee19bef581cca0dce03984 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Fri, 4 Jul 2025 13:02:21 +0200 Subject: [PATCH 07/13] tests: drivers: uart: async_dual: Extend nrf54h20dk configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add workaround timer to instances that are used for reception in the test. Signed-off-by: Krzysztof Chruściński --- .../boards/nrf54h20dk_nrf54h20_common.dtsi | 22 +++++++++++++++++++ .../boards/nrf54h20dk_nrf54h20_cpuapp.overlay | 12 ++++++++++ 2 files changed, 34 insertions(+) diff --git a/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi b/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi index e651cf5399ed2..1ca0775f629eb 100644 --- a/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi +++ b/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi @@ -60,6 +60,16 @@ }; }; +&timer134 { + status = "reserved"; +}; + +&dppic135 { + owned-channels = <0>; + source-channels = <0>; + status = "okay"; +}; + dut: &uart134 { status = "okay"; current-speed = <115200>; @@ -67,6 +77,7 @@ dut: &uart134 { pinctrl-1 = <&uart134_alt_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + timer = <&timer134>; zephyr,pm-device-runtime-auto; }; @@ -80,12 +91,23 @@ dut_aux: &uart137 { zephyr,pm-device-runtime-auto; }; +&dppic120 { + owned-channels = <0>; + source-channels = <0>; + status = "okay"; +}; + +&timer120 { + status = "reserved"; +}; + dut2: &uart120 { pinctrl-0 = <&uart120_default_alt>; pinctrl-1 = <&uart120_sleep_alt>; pinctrl-names = "default", "sleep"; current-speed = <115200>; hw-flow-control; + timer = <&timer120>; zephyr,pm-device-runtime-auto; }; diff --git a/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay b/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay index fcdc838a54e43..65a2c52016e64 100644 --- a/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay @@ -9,3 +9,15 @@ status = "reserved"; interrupt-parent = <&cpuppr_clic>; }; + +&timer134 { + interrupt-parent = <&cpuppr_clic>; +}; + +&dppic135 { + child-owned-channels = <0>; +}; + +&uart136 { + current-speed = <1000000>; +}; From e2bd012727bda511e1b869f262af75793820a54f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Fri, 11 Jul 2025 12:46:55 +0200 Subject: [PATCH 08/13] tests: drivers: uart: async_dual: Optimize test data handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stress test is executed on CPUs with slow clock (16MHz). Handling of test data in UART_RX_RDY event is optimized to reduce time spent in the interrupt context. Since payload is always a decrementing sequence, fixed array is used to compare memory which allows to use standard memcmp instead of byte by byte comparison. Signed-off-by: Krzysztof Chruściński --- tests/drivers/uart/uart_async_dual/src/main.c | 152 +++++++++++------- 1 file changed, 94 insertions(+), 58 deletions(-) diff --git a/tests/drivers/uart/uart_async_dual/src/main.c b/tests/drivers/uart/uart_async_dual/src/main.c index c2ad5d8a0a0c0..160d8c761040d 100644 --- a/tests/drivers/uart/uart_async_dual/src/main.c +++ b/tests/drivers/uart/uart_async_dual/src/main.c @@ -61,6 +61,25 @@ ZTEST_DMEM struct dut_data duts[] = { #endif }; +/* Array that contains potential payload. It is used to memcmp against incoming packets. */ +static const uint8_t test_buf[256] = { + 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, + 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, + 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214, 213, 212, 211, + 210, 209, 208, 207, 206, 205, 204, 203, 202, 201, 200, 199, 198, 197, 196, + 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185, 184, 183, 182, 181, + 180, 179, 178, 177, 176, 175, 174, 173, 172, 171, 170, 169, 168, 167, 166, + 165, 164, 163, 162, 161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151, + 150, 149, 148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, + 135, 134, 133, 132, 131, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, + 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, + 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, + 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, + 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + static void pm_check(const struct device *dev, const struct device *second_dev, bool exp_on, int line) { @@ -128,6 +147,8 @@ enum test_rx_mode { RX_ALL, }; +typedef bool (*test_on_rx_rdy_t)(const struct device *dev, uint8_t *buf, size_t len); + struct test_rx_data { uint8_t hdr[1]; uint8_t buf[256]; @@ -140,6 +161,7 @@ struct test_rx_data { struct k_sem sem; uint32_t timeout; uint32_t buf_idx; + test_on_rx_rdy_t on_rx_rdy; }; static struct test_tx_data tx_data; @@ -277,75 +299,86 @@ static void on_tx_done(const struct device *dev, struct uart_event *evt) try_tx(dev, true); } -static void on_rx_rdy(const struct device *dev, struct uart_event *evt) +static bool on_rx_rdy_rx_all(const struct device *dev, uint8_t *buf, size_t len) { - uint32_t len = evt->data.rx.len; - uint32_t off = evt->data.rx.offset; - int err; + bool ok; - if (!rx_data.cont) { - return; + if (rx_data.payload_idx == 0) { + rx_data.payload_idx = buf[0] - 1; + buf++; + len--; } - rx_data.rx_cnt += evt->data.rx.len; - if (evt->data.rx.buf == rx_data.hdr) { - if (rx_data.hdr[0] == 1) { - /* single byte packet. */ - err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); - zassert_equal(err, 0); - return; + ok = memcmp(buf, &test_buf[256 - rx_data.payload_idx], len) == 0; + rx_data.payload_idx -= len; + + return ok; +} + +static bool on_rx_rdy_hdr(const struct device *dev, uint8_t *buf, size_t len); + +static bool on_rx_rdy_payload(const struct device *dev, uint8_t *buf, size_t len) +{ + bool ok; + int err; + + ok = memcmp(buf, &test_buf[255 - rx_data.payload_idx], len) == 0; + if (!ok) { + for (int i = 0; i < len; i++) { + if (buf[i] != test_buf[255 - rx_data.payload_idx + i]) { + zassert_true(false, "Byte %d expected: %02x got: %02x", + i, buf[i], test_buf[255 - rx_data.payload_idx + i]); + } } + rx_data.cont = false; + tx_data.cont = false; + zassert_true(ok); + return false; + } - zassert_equal(rx_data.payload_idx, 0); - rx_data.state = RX_PAYLOAD; - rx_data.payload_idx = rx_data.hdr[0] - 1; - if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { - size_t l = rx_data.hdr[0] - 1; + rx_data.payload_idx -= len; - zassert_true(l > 0); + if (rx_data.payload_idx == 0) { + rx_data.state = RX_HDR; + rx_data.on_rx_rdy = on_rx_rdy_hdr; + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { rx_data.buf_req = false; - err = uart_rx_buf_rsp(dev, rx_data.buf, rx_data.hdr[0] - 1); + err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); } - } else { - for (int i = 0; i < len; i++) { - bool ok; + } - if ((rx_data.mode == RX_ALL) && (rx_data.payload_idx == 0)) { - rx_data.payload_idx = evt->data.rx.buf[off + i]; - ok = true; - } else { - ok = evt->data.rx.buf[off + i] == (uint8_t)rx_data.payload_idx; - } + return true; +} - if (!ok) { - LOG_ERR("Unexpected data at %d, exp:%02x got:%02x", - i, rx_data.payload_idx, evt->data.rx.buf[off + i]); - } +static bool on_rx_rdy_hdr(const struct device *dev, uint8_t *buf, size_t len) +{ + int err; - zassert_true(ok, "Unexpected data at %d, exp:%02x got:%02x", - i, len - i, evt->data.rx.buf[off + i]); - if (!ok) { - rx_data.cont = false; - tx_data.cont = false; - /* Avoid flood of errors as we are in the interrupt and ztest - * cannot abort from here. - */ - return; - } - rx_data.payload_idx--; - if (rx_data.payload_idx == 0) { - if (rx_data.mode != RX_ALL) { - zassert_equal(i + 1, len, "len:%d i:%d", len, i); - } - rx_data.state = RX_HDR; - if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { - rx_data.buf_req = false; - err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); - zassert_equal(err, 0); - } - } + zassert_equal(buf, rx_data.hdr); + zassert_equal(len, 1); + if (rx_data.hdr[0] == 1) { + /* single byte packet. */ + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { + err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); } + return true; } + + zassert_equal(rx_data.payload_idx, 0); + rx_data.on_rx_rdy = on_rx_rdy_payload; + rx_data.payload_idx = rx_data.hdr[0] - 1; + rx_data.state = RX_PAYLOAD; + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { + size_t l = rx_data.hdr[0] - 1; + + zassert_true(l > 0); + rx_data.buf_req = false; + err = uart_rx_buf_rsp(dev, rx_data.buf, buf[0] - 1); + } + + return true; } static void on_rx_buf_req(const struct device *dev) @@ -385,7 +418,6 @@ static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *us return; } - zassert_true(len > 0); err = uart_rx_enable(dev, buf, len, data->timeout); zassert_equal(err, 0, "Unexpected err:%d", err); @@ -417,7 +449,11 @@ static void uart_callback(const struct device *dev, struct uart_event *evt, void break; case UART_RX_RDY: zassert_true(dev == rx_dev); - on_rx_rdy(dev, evt); + if (rx_data.cont) { + rx_data.on_rx_rdy(dev, &evt->data.rx.buf[evt->data.rx.offset], + evt->data.rx.len); + rx_data.rx_cnt += evt->data.rx.len; + } break; case UART_RX_BUF_RELEASED: zassert_true(dev == rx_dev); @@ -516,7 +552,7 @@ static void var_packet(uint32_t baudrate, enum test_tx_mode tx_mode, tx_data.rx_timeout = rx_data.timeout; rx_data.cont = true; rx_data.rx_cnt = 0; - rx_data.state = RX_HDR; + rx_data.on_rx_rdy = rx_mode == RX_ALL ? on_rx_rdy_rx_all : on_rx_rdy_hdr; rx_data.mode = rx_mode; ring_buf_init(&tx_data.rbuf, sizeof(tx_data.buf), tx_data.buf); From 4fb40ff56ba7a0fea64a43753a2cf24890050e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Bainczyk?= Date: Wed, 1 Oct 2025 11:22:21 +0200 Subject: [PATCH 09/13] modules: hal_nordic: add new errno error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add macro for translating new errno error codes to strings. Signed-off-by: Michał Bainczyk --- modules/hal_nordic/nrfx/nrfx_glue.c | 22 ++++++++++++++++++++++ modules/hal_nordic/nrfx/nrfx_log.h | 10 ++++++++++ 2 files changed, 32 insertions(+) diff --git a/modules/hal_nordic/nrfx/nrfx_glue.c b/modules/hal_nordic/nrfx/nrfx_glue.c index 4e7fc94e11de0..dd972ee039d82 100644 --- a/modules/hal_nordic/nrfx/nrfx_glue.c +++ b/modules/hal_nordic/nrfx/nrfx_glue.c @@ -45,3 +45,25 @@ char const *nrfx_error_string_get(nrfx_err_t code) default: return "unknown"; } } + +char const *nrfx_new_error_string_get(int code) +{ + #define NRFX_NEW_ERROR_STRING_CASE(code) case code: return #code + switch (-code) + { + NRFX_NEW_ERROR_STRING_CASE(0); + NRFX_NEW_ERROR_STRING_CASE(ECANCELED); + NRFX_NEW_ERROR_STRING_CASE(ENOMEM); + NRFX_NEW_ERROR_STRING_CASE(ENOTSUP); + NRFX_NEW_ERROR_STRING_CASE(EINVAL); + NRFX_NEW_ERROR_STRING_CASE(EINPROGRESS); + NRFX_NEW_ERROR_STRING_CASE(E2BIG); + NRFX_NEW_ERROR_STRING_CASE(ETIMEDOUT); + NRFX_NEW_ERROR_STRING_CASE(EPERM); + NRFX_NEW_ERROR_STRING_CASE(EFAULT); + NRFX_NEW_ERROR_STRING_CASE(EACCES); + NRFX_NEW_ERROR_STRING_CASE(EBUSY); + NRFX_NEW_ERROR_STRING_CASE(EALREADY); + default: return "unknown"; + } +} diff --git a/modules/hal_nordic/nrfx/nrfx_log.h b/modules/hal_nordic/nrfx/nrfx_log.h index 682388d7dd16d..973ca672b5384 100644 --- a/modules/hal_nordic/nrfx/nrfx_log.h +++ b/modules/hal_nordic/nrfx/nrfx_log.h @@ -128,6 +128,16 @@ LOG_MODULE_REGISTER(NRFX_MODULE_PREFIX, NRFX_MODULE_LOG_LEVEL); #define NRFX_LOG_ERROR_STRING_GET(error_code) nrfx_error_string_get(error_code) extern char const *nrfx_error_string_get(nrfx_err_t code); +/** + * @brief Macro for getting the textual representation of a given errno error code. + * + * @param[in] error_code Errno error code. + * + * @return String containing the textual representation of the errno error code. + */ +#define NRFX_NEW_LOG_ERROR_STRING_GET(error_code) nrfx_new_error_string_get(error_code) +extern char const *nrfx_new_error_string_get(int code); + /** @} */ #ifdef __cplusplus From ec50e81f811756d104124f8fdd76a1bcb183141e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Bainczyk?= Date: Fri, 3 Oct 2025 14:15:01 +0200 Subject: [PATCH 10/13] modules: hal_nordic: remove TIMER config symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove TIMER config symbols to align to changes in nrfx driver. Signed-off-by: Michał Bainczyk --- modules/hal_nordic/nrfx/Kconfig | 130 +------------------------ modules/hal_nordic/nrfx/nrfx_kconfig.h | 78 --------------- 2 files changed, 2 insertions(+), 206 deletions(-) diff --git a/modules/hal_nordic/nrfx/Kconfig b/modules/hal_nordic/nrfx/Kconfig index 0b1882ba7a653..92e895c4a021c 100644 --- a/modules/hal_nordic/nrfx/Kconfig +++ b/modules/hal_nordic/nrfx/Kconfig @@ -279,8 +279,7 @@ config NRFX_MRAMC config NRFX_NFCT bool "NFCT driver" depends on $(dt_nodelabel_exists,nfct) - select NRFX_TIMER4 if SOC_SERIES_NRF52X - select NRFX_TIMER2 if SOC_SERIES_NRF53X + select NRFX_TIMER config NRFX_NVMC bool "NVMC driver" @@ -764,132 +763,7 @@ config NRFX_TEMP depends on $(dt_nodelabel_exists,temp) config NRFX_TIMER - bool - -config NRFX_TIMER0 - bool "TIMER0 driver instance" - depends on $(dt_nodelabel_exists,timer0) - select NRFX_TIMER - -config NRFX_TIMER1 - bool "TIMER1 driver instance" - depends on $(dt_nodelabel_exists,timer1) - select NRFX_TIMER - -config NRFX_TIMER2 - bool "TIMER2 driver instance" - depends on $(dt_nodelabel_exists,timer2) - select NRFX_TIMER - -config NRFX_TIMER3 - bool "TIMER3 driver instance" - depends on $(dt_nodelabel_exists,timer3) - select NRFX_TIMER - -config NRFX_TIMER4 - bool "TIMER4 driver instance" - depends on $(dt_nodelabel_exists,timer4) - select NRFX_TIMER - -config NRFX_TIMER00 - bool "TIMER00 driver instance" - depends on $(dt_nodelabel_exists,timer00) - select NRFX_TIMER - -config NRFX_TIMER10 - bool "TIMER10 driver instance" - depends on $(dt_nodelabel_exists,timer10) - select NRFX_TIMER - -config NRFX_TIMER20 - bool "TIMER20 driver instance" - depends on $(dt_nodelabel_exists,timer20) - select NRFX_TIMER - -config NRFX_TIMER21 - bool "TIMER21 driver instance" - depends on $(dt_nodelabel_exists,timer21) - select NRFX_TIMER - -config NRFX_TIMER22 - bool "TIMER22 driver instance" - depends on $(dt_nodelabel_exists,timer22) - select NRFX_TIMER - -config NRFX_TIMER23 - bool "TIMER23 driver instance" - depends on $(dt_nodelabel_exists,timer23) - select NRFX_TIMER - -config NRFX_TIMER24 - bool "TIMER24 driver instance" - depends on $(dt_nodelabel_exists,timer24) - select NRFX_TIMER - -config NRFX_TIMER020 - bool "TIMER020 driver instance" - depends on $(dt_nodelabel_exists,timer020) - select NRFX_TIMER - -config NRFX_TIMER021 - bool "TIMER021 driver instance" - depends on $(dt_nodelabel_exists,timer021) - select NRFX_TIMER - -config NRFX_TIMER022 - bool "TIMER022 driver instance" - depends on $(dt_nodelabel_exists,timer022) - select NRFX_TIMER - -config NRFX_TIMER120 - bool "TIMER120 driver instance" - depends on $(dt_nodelabel_exists,timer120) - select NRFX_TIMER - -config NRFX_TIMER121 - bool "TIMER121 driver instance" - depends on $(dt_nodelabel_exists,timer121) - select NRFX_TIMER - -config NRFX_TIMER130 - bool "TIMER130 driver instance" - depends on $(dt_nodelabel_exists,timer130) - select NRFX_TIMER - -config NRFX_TIMER131 - bool "TIMER131 driver instance" - depends on $(dt_nodelabel_exists,timer131) - select NRFX_TIMER - -config NRFX_TIMER132 - bool "TIMER132 driver instance" - depends on $(dt_nodelabel_exists,timer132) - select NRFX_TIMER - -config NRFX_TIMER133 - bool "TIMER133 driver instance" - depends on $(dt_nodelabel_exists,timer133) - select NRFX_TIMER - -config NRFX_TIMER134 - bool "TIMER134 driver instance" - depends on $(dt_nodelabel_exists,timer134) - select NRFX_TIMER - -config NRFX_TIMER135 - bool "TIMER135 driver instance" - depends on $(dt_nodelabel_exists,timer135) - select NRFX_TIMER - -config NRFX_TIMER136 - bool "TIMER136 driver instance" - depends on $(dt_nodelabel_exists,timer136) - select NRFX_TIMER - -config NRFX_TIMER137 - bool "TIMER137 driver instance" - depends on $(dt_nodelabel_exists,timer137) - select NRFX_TIMER + bool "TIMER driver" config NRFX_TWI bool diff --git a/modules/hal_nordic/nrfx/nrfx_kconfig.h b/modules/hal_nordic/nrfx/nrfx_kconfig.h index 4f0d594a21236..c65a75cd0c661 100644 --- a/modules/hal_nordic/nrfx/nrfx_kconfig.h +++ b/modules/hal_nordic/nrfx/nrfx_kconfig.h @@ -659,84 +659,6 @@ #ifdef CONFIG_NRFX_TIMER #define NRFX_TIMER_ENABLED 1 #endif -#ifdef CONFIG_NRFX_TIMER_LOG -#define NRFX_TIMER_CONFIG_LOG_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER0 -#define NRFX_TIMER0_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER1 -#define NRFX_TIMER1_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER2 -#define NRFX_TIMER2_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER3 -#define NRFX_TIMER3_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER4 -#define NRFX_TIMER4_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER00 -#define NRFX_TIMER00_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER10 -#define NRFX_TIMER10_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER20 -#define NRFX_TIMER20_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER21 -#define NRFX_TIMER21_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER22 -#define NRFX_TIMER22_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER23 -#define NRFX_TIMER23_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER24 -#define NRFX_TIMER24_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER020 -#define NRFX_TIMER020_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER021 -#define NRFX_TIMER021_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER022 -#define NRFX_TIMER022_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER120 -#define NRFX_TIMER120_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER121 -#define NRFX_TIMER121_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER130 -#define NRFX_TIMER130_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER131 -#define NRFX_TIMER131_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER132 -#define NRFX_TIMER132_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER133 -#define NRFX_TIMER133_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER134 -#define NRFX_TIMER134_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER135 -#define NRFX_TIMER135_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER136 -#define NRFX_TIMER136_ENABLED 1 -#endif -#ifdef CONFIG_NRFX_TIMER137 -#define NRFX_TIMER137_ENABLED 1 -#endif #ifdef CONFIG_NRFX_TWI #define NRFX_TWI_ENABLED 1 From d5ec020fca4d614da145b313bd5c9ff99f75cd77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Bainczyk?= Date: Fri, 3 Oct 2025 14:17:11 +0200 Subject: [PATCH 11/13] tests: drivers: uart: remove TIMER config symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove TIMER config symbols to align to changes in nrfx driver. Signed-off-by: Michał Bainczyk --- tests/drivers/uart/uart_mix_fifo_poll/testcase.yaml | 2 +- tests/drivers/uart/uart_pm/testcase.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/drivers/uart/uart_mix_fifo_poll/testcase.yaml b/tests/drivers/uart/uart_mix_fifo_poll/testcase.yaml index 0ba5944cca665..d481ed33adcfa 100644 --- a/tests/drivers/uart/uart_mix_fifo_poll/testcase.yaml +++ b/tests/drivers/uart/uart_mix_fifo_poll/testcase.yaml @@ -79,7 +79,7 @@ tests: - CONFIG_UART_0_ENHANCED_POLL_OUT=y - CONFIG_UART_0_NRF_HW_ASYNC=y - CONFIG_UART_0_NRF_HW_ASYNC_TIMER=2 - - CONFIG_NRFX_TIMER2=y + - CONFIG_NRFX_TIMER=y platform_allow: - nrf52840dk/nrf52840 - nrf5340dk/nrf5340/cpuapp diff --git a/tests/drivers/uart/uart_pm/testcase.yaml b/tests/drivers/uart/uart_pm/testcase.yaml index 008eae9d3d7f9..38da5dca48007 100644 --- a/tests/drivers/uart/uart_pm/testcase.yaml +++ b/tests/drivers/uart/uart_pm/testcase.yaml @@ -78,7 +78,7 @@ tests: - CONFIG_UART_0_ASYNC=y - CONFIG_UART_0_NRF_HW_ASYNC=y - CONFIG_UART_0_NRF_HW_ASYNC_TIMER=2 - - CONFIG_NRFX_TIMER2=y + - CONFIG_NRFX_TIMER=y - CONFIG_UART_0_ENHANCED_POLL_OUT=n drivers.uart.pm.async.enhanced_poll: @@ -88,7 +88,7 @@ tests: - CONFIG_UART_0_ASYNC=y - CONFIG_UART_0_NRF_HW_ASYNC=y - CONFIG_UART_0_NRF_HW_ASYNC_TIMER=2 - - CONFIG_NRFX_TIMER2=y + - CONFIG_NRFX_TIMER=y - CONFIG_UART_0_ENHANCED_POLL_OUT=y platform_exclude: - nrf54h20dk/nrf54h20/cpuapp From c8d835d942ff82595475ec2ce9be758b3e78a4da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Bainczyk?= Date: Wed, 1 Oct 2025 15:00:15 +0200 Subject: [PATCH 12/13] samples: usb: uac2: align feedback_nrf to changes in nrfx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Align feedback_nrf to changes in nrfx TIMER driver. Signed-off-by: Michał Bainczyk --- .../boards/nrf5340dk_nrf5340_cpuapp.conf | 2 +- .../boards/nrf54h20dk_nrf54h20_cpuapp.conf | 2 +- samples/subsys/usb/uac2_explicit_feedback/src/feedback_nrf.c | 4 ++-- .../boards/nrf5340dk_nrf5340_cpuapp.conf | 2 +- .../boards/nrf54h20dk_nrf54h20_cpuapp.conf | 2 +- samples/subsys/usb/uac2_implicit_feedback/src/feedback_nrf.c | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/samples/subsys/usb/uac2_explicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf b/samples/subsys/usb/uac2_explicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf index ce67033381a69..4c7c060177e04 100644 --- a/samples/subsys/usb/uac2_explicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf +++ b/samples/subsys/usb/uac2_explicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf @@ -1,2 +1,2 @@ #Enable timer for asynchronous feedback -CONFIG_NRFX_TIMER2=y +CONFIG_NRFX_TIMER=y diff --git a/samples/subsys/usb/uac2_explicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf b/samples/subsys/usb/uac2_explicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf index d7526fc6ec9a9..ce07cde36568c 100644 --- a/samples/subsys/usb/uac2_explicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf +++ b/samples/subsys/usb/uac2_explicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf @@ -1,4 +1,4 @@ # Enable timer for asynchronous feedback CONFIG_NRFX_GPPI=y -CONFIG_NRFX_TIMER131=y +CONFIG_NRFX_TIMER=y CONFIG_NRFX_GPIOTE130=y diff --git a/samples/subsys/usb/uac2_explicit_feedback/src/feedback_nrf.c b/samples/subsys/usb/uac2_explicit_feedback/src/feedback_nrf.c index 6b7ed239b2683..860048acaf744 100644 --- a/samples/subsys/usb/uac2_explicit_feedback/src/feedback_nrf.c +++ b/samples/subsys/usb/uac2_explicit_feedback/src/feedback_nrf.c @@ -63,8 +63,8 @@ static inline void feedback_target_init(void) static const nrfx_gpiote_t gpiote = NRFX_GPIOTE_INSTANCE(FEEDBACK_GPIOTE_INSTANCE_NUMBER); -static const nrfx_timer_t feedback_timer_instance = - NRFX_TIMER_INSTANCE(FEEDBACK_TIMER_INSTANCE_NUMBER); +static nrfx_timer_t feedback_timer_instance = + NRFX_TIMER_INSTANCE(NRF_TIMER_INST_GET(FEEDBACK_TIMER_INSTANCE_NUMBER)); /* See 5.12.4.2 Feedback in Universal Serial Bus Specification Revision 2.0 for * more information about the feedback. There is a direct implementation of the diff --git a/samples/subsys/usb/uac2_implicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf b/samples/subsys/usb/uac2_implicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf index ce67033381a69..4c7c060177e04 100644 --- a/samples/subsys/usb/uac2_implicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf +++ b/samples/subsys/usb/uac2_implicit_feedback/boards/nrf5340dk_nrf5340_cpuapp.conf @@ -1,2 +1,2 @@ #Enable timer for asynchronous feedback -CONFIG_NRFX_TIMER2=y +CONFIG_NRFX_TIMER=y diff --git a/samples/subsys/usb/uac2_implicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf b/samples/subsys/usb/uac2_implicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf index c4e23e5d54fe5..3f9d5d61d283f 100644 --- a/samples/subsys/usb/uac2_implicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf +++ b/samples/subsys/usb/uac2_implicit_feedback/boards/nrf54h20dk_nrf54h20_cpuapp.conf @@ -1,3 +1,3 @@ # Enable timer for asynchronous feedback CONFIG_NRFX_GPPI=y -CONFIG_NRFX_TIMER131=y +CONFIG_NRFX_TIMER=y diff --git a/samples/subsys/usb/uac2_implicit_feedback/src/feedback_nrf.c b/samples/subsys/usb/uac2_implicit_feedback/src/feedback_nrf.c index 9623f22e5662c..9e8c2ac0342ff 100644 --- a/samples/subsys/usb/uac2_implicit_feedback/src/feedback_nrf.c +++ b/samples/subsys/usb/uac2_implicit_feedback/src/feedback_nrf.c @@ -51,8 +51,8 @@ static inline void feedback_target_init(void) #error "Unsupported target" #endif -static const nrfx_timer_t feedback_timer_instance = - NRFX_TIMER_INSTANCE(FEEDBACK_TIMER_INSTANCE_NUMBER); +static nrfx_timer_t feedback_timer_instance = + NRFX_TIMER_INSTANCE(NRF_TIMER_INST_GET(FEEDBACK_TIMER_INSTANCE_NUMBER)); /* While it might be possible to determine I2S FRAMESTART to USB SOF offset * entirely in software, the I2S API lacks appropriate timestamping. Therefore From 311ea510cd43c6f3db15ff9efde8abb150166104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Bainczyk?= Date: Wed, 1 Oct 2025 15:02:08 +0200 Subject: [PATCH 13/13] drivers: serial: uart_nrfx_uarte: align to changes in nrfx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Align driver to changes in nrfx UARTE driver. Signed-off-by: Michał Bainczyk --- drivers/serial/Kconfig.nrfx | 30 +++++++++--------------------- drivers/serial/uart_nrfx_uarte.c | 26 +++++++++++++------------- 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/drivers/serial/Kconfig.nrfx b/drivers/serial/Kconfig.nrfx index 03e06bd721085..e0eac387c876e 100644 --- a/drivers/serial/Kconfig.nrfx +++ b/drivers/serial/Kconfig.nrfx @@ -214,37 +214,25 @@ nrfx_uart_num = 137 rsource "Kconfig.nrfx_uart_instance" endif -config NRFX_TIMER0 +config NRFX_TIMER default y depends on UART_0_NRF_HW_ASYNC_TIMER = 0 \ || UART_1_NRF_HW_ASYNC_TIMER = 0 \ || UART_2_NRF_HW_ASYNC_TIMER = 0 \ - || UART_3_NRF_HW_ASYNC_TIMER = 0 - -config NRFX_TIMER1 - default y - depends on UART_0_NRF_HW_ASYNC_TIMER = 1 \ + || UART_3_NRF_HW_ASYNC_TIMER = 0 \ + || UART_0_NRF_HW_ASYNC_TIMER = 1 \ || UART_1_NRF_HW_ASYNC_TIMER = 1 \ || UART_2_NRF_HW_ASYNC_TIMER = 1 \ - || UART_3_NRF_HW_ASYNC_TIMER = 1 - -config NRFX_TIMER2 - default y - depends on UART_0_NRF_HW_ASYNC_TIMER = 2 \ + || UART_3_NRF_HW_ASYNC_TIMER = 1 \ + || UART_0_NRF_HW_ASYNC_TIMER = 2 \ || UART_1_NRF_HW_ASYNC_TIMER = 2 \ || UART_2_NRF_HW_ASYNC_TIMER = 2 \ - || UART_3_NRF_HW_ASYNC_TIMER = 2 - -config NRFX_TIMER3 - default y - depends on UART_0_NRF_HW_ASYNC_TIMER = 3 \ + || UART_3_NRF_HW_ASYNC_TIMER = 2 \ + || UART_0_NRF_HW_ASYNC_TIMER = 3 \ || UART_1_NRF_HW_ASYNC_TIMER = 3 \ || UART_2_NRF_HW_ASYNC_TIMER = 3 \ - || UART_3_NRF_HW_ASYNC_TIMER = 3 - -config NRFX_TIMER4 - default y - depends on UART_0_NRF_HW_ASYNC_TIMER = 4 \ + || UART_3_NRF_HW_ASYNC_TIMER = 3 \ + || UART_0_NRF_HW_ASYNC_TIMER = 4 \ || UART_1_NRF_HW_ASYNC_TIMER = 4 \ || UART_2_NRF_HW_ASYNC_TIMER = 4 \ || UART_3_NRF_HW_ASYNC_TIMER = 4 diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index 90247dfecc5c6..ad9afdafd44a0 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -306,6 +306,7 @@ struct uarte_nrfx_data { #endif #ifdef UARTE_ANY_ASYNC struct uarte_async_cb *async; + nrfx_timer_t timer; #endif atomic_val_t poll_out_lock; atomic_t flags; @@ -448,7 +449,6 @@ struct uarte_nrfx_config { size_t bounce_buf_swap_len; struct uarte_async_rx_cbwt *cbwt_data; #endif - nrfx_timer_t timer; uint8_t *tx_cache; uint8_t *rx_flush_buf; #endif @@ -521,7 +521,7 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask) #if defined(UARTE_ANY_ASYNC) && !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) if (data->async && HW_RX_COUNTING_ENABLED(config)) { - nrfx_timer_disable(&config->timer); + nrfx_timer_disable(&data->timer); /* Timer/counter value is reset when disabled. */ data->async->rx.total_byte_cnt = 0; data->async->rx.total_user_byte_cnt = 0; @@ -878,7 +878,7 @@ static void uarte_periph_enable(const struct device *dev) #ifdef UARTE_ANY_ASYNC if (data->async) { if (HW_RX_COUNTING_ENABLED(config)) { - const nrfx_timer_t *timer = &config->timer; + nrfx_timer_t *timer = &data->timer; nrfx_timer_enable(timer); @@ -1067,13 +1067,13 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) if (HW_RX_COUNTING_ENABLED(cfg)) { nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( - NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); + NRF_TIMER_BASE_FREQUENCY_GET(data->timer.p_reg)); uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY); - uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); + uint32_t tsk_addr = nrfx_timer_task_address_get(&data->timer, NRF_TIMER_TASK_COUNT); tmr_config.mode = NRF_TIMER_MODE_COUNTER; tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; - ret = nrfx_timer_init(&cfg->timer, + ret = nrfx_timer_init(&data->timer, &tmr_config, timer_handler); if (ret != NRFX_SUCCESS) { @@ -1081,12 +1081,12 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) return -EINVAL; } - nrfx_timer_clear(&cfg->timer); + nrfx_timer_clear(&data->timer); ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); - nrfx_timer_uninit(&cfg->timer); + nrfx_timer_uninit(&data->timer); return -EINVAL; } @@ -2258,7 +2258,7 @@ static void rx_timeout(struct k_timer *timer) NRF_UARTE_INT_ENDRX_MASK); if (HW_RX_COUNTING_ENABLED(cfg)) { - read = nrfx_timer_capture(&cfg->timer, 0); + read = nrfx_timer_capture(&data->timer, 0); } else { read = async_rx->cnt.cnt; } @@ -3241,7 +3241,7 @@ static void uarte_pm_suspend(const struct device *dev) #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) if (data->async && HW_RX_COUNTING_ENABLED(cfg)) { - nrfx_timer_disable(&cfg->timer); + nrfx_timer_disable(&data->timer); /* Timer/counter value is reset when disabled. */ data->async->rx.total_byte_cnt = 0; data->async->rx.total_user_byte_cnt = 0; @@ -3598,6 +3598,9 @@ static int uarte_instance_deinit(const struct device *dev) (.uart_config = UARTE_CONFIG(idx),)) \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ (.async = &uarte##idx##_async,)) \ + IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ + (.timer = NRFX_TIMER_INSTANCE(NRF_TIMER_INST_GET( \ + CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER)),)) \ IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (.int_driven = &uarte##idx##_int_driven,)) \ }; \ @@ -3639,9 +3642,6 @@ static int uarte_instance_deinit(const struct device *dev) .rx_flush_buf = uarte##idx##_flush_buf,)) \ IF_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER, \ (UARTE_COUNT_BYTES_WITH_TIMER_CONFIG(idx))) \ - IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ - (.timer = NRFX_TIMER_INSTANCE( \ - CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ IF_ENABLED(INSTANCE_IS_FAST(_, /*empty*/, idx, _), \ (.clk_dev = DEVICE_DT_GET_OR_NULL(DT_CLOCKS_CTLR(UARTE(idx))), \ .clk_spec = { \