@@ -160,6 +160,7 @@ struct uarte_async_rx {
160160 /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
161161 volatile bool is_in_irq ;
162162#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
163+ uint16_t prev_amount ;
163164 uint8_t flush_cnt ;
164165 volatile bool enabled ;
165166 volatile bool discard_fifo ;
@@ -656,6 +657,104 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
656657}
657658
658659#if defined(UARTE_ANY_ASYNC )
660+
661+ /* Function for preparing flushing internal RX fifo. Actual flushing is performed
662+ * only if UARTE is going to be disabled. For example when runtime device PM is used
663+ * then UARTE might kept enabled and flushing is not performed. Preparation is
664+ * harmless so it is always possible to just start another RX.
665+ *
666+ * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
667+ * remains. In certain cases it makes it impossible to distinguish between
668+ * case when fifo was empty and not. Function is trying to minimize chances of
669+ * error with following measures:
670+ * - RXAMOUNT is read before flushing and compared against value after flushing
671+ * if they differ it indicates that data was flushed
672+ * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
673+ * it is still dirty. If not then it indicates that data was flushed
674+ *
675+ * In other cases function indicates that fifo was empty. It means that if
676+ * number of bytes in the fifo equal last rx transfer length and data is equal
677+ * to dirty marker it will be discarded.
678+ *
679+ * @param dev Device.
680+ */
681+ static void rx_flush_prepare (const struct device * dev )
682+ {
683+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
684+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
685+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
686+ const struct uarte_nrfx_config * config = dev -> config ;
687+ struct uarte_nrfx_data * data = dev -> data ;
688+
689+ if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
690+ memset (config -> rx_flush_buf , dirty , UARTE_HW_RX_FIFO_SIZE );
691+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
692+ sys_cache_data_flush_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
693+ }
694+ }
695+
696+ data -> async -> rx .prev_amount = nrf_uarte_rx_amount_get (uarte );
697+ nrf_uarte_rx_buffer_set (uarte , config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
698+ /* Final part of handling RXTO event is in ENDRX interrupt
699+ * handler. ENDRX is generated as a result of FLUSHRX task.
700+ */
701+ __ASSERT_NO_MSG (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX ));
702+ }
703+
704+ /** @brief Actual RX FIFO flushing
705+ *
706+ * @param dev Device.
707+ *
708+ * @return number of bytes flushed from the fifo.
709+ */
710+ static uint8_t rx_flush (const struct device * dev )
711+ {
712+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
713+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
714+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
715+ const struct uarte_nrfx_config * config = dev -> config ;
716+ struct uarte_nrfx_data * data = dev -> data ;
717+ uint32_t rx_amount ;
718+
719+ nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
720+ while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
721+ /* empty */
722+ }
723+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
724+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
725+
726+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
727+ sys_cache_data_invd_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
728+ }
729+
730+ rx_amount = nrf_uarte_rx_amount_get (uarte );
731+ if (!IS_ENABLED (RX_FLUSH_WORKAROUND )) {
732+ return rx_amount ;
733+ }
734+
735+ if (rx_amount != data -> async -> rx .prev_amount ) {
736+ return rx_amount ;
737+ }
738+
739+ if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
740+ return 0 ;
741+ }
742+
743+ /* If event is set that means it came after ENDRX so there was at least
744+ * one byte in the RX FIFO.
745+ */
746+ if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
747+ return rx_amount ;
748+ }
749+
750+ for (int i = 0 ; i < rx_amount ; i ++ ) {
751+ if (config -> rx_flush_buf [i ] != dirty ) {
752+ return rx_amount ;
753+ }
754+ }
755+
756+ return 0 ;
757+ }
659758/** @brief Disable UARTE peripheral is not used by RX or TX.
660759 *
661760 * It must be called with interrupts locked so that deciding if no direction is
@@ -676,6 +775,9 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
676775 return ;
677776 }
678777
778+ if (data -> async && data -> async -> rx .prev_amount ) {
779+ data -> async -> rx .flush_cnt = rx_flush (dev );
780+ }
679781#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
680782 const struct uarte_nrfx_config * config = dev -> config ;
681783
@@ -689,9 +791,6 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
689791
690792 nrf_uarte_disable (get_uarte_instance (dev ));
691793}
692- #endif
693-
694- #ifdef UARTE_ANY_ASYNC
695794
696795static void rx_timeout (struct k_timer * timer );
697796static void tx_timeout (struct k_timer * timer );
@@ -1432,93 +1531,6 @@ static void endrx_isr(const struct device *dev)
14321531#endif
14331532}
14341533
1435- /* Function for flushing internal RX fifo. Function can be called in case
1436- * flushed data is discarded or when data is valid and needs to be retrieved.
1437- *
1438- * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
1439- * remains. In certain cases it makes it impossible to distinguish between
1440- * case when fifo was empty and not. Function is trying to minimize chances of
1441- * error with following measures:
1442- * - RXAMOUNT is read before flushing and compared against value after flushing
1443- * if they differ it indicates that data was flushed
1444- * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
1445- * it is still dirty. If not then it indicates that data was flushed
1446- *
1447- * In other cases function indicates that fifo was empty. It means that if
1448- * number of bytes in the fifo equal last rx transfer length and data is equal
1449- * to dirty marker it will be discarded.
1450- *
1451- * @param dev Device.
1452- * @param buf Buffer for flushed data, null indicates that flushed data can be
1453- * dropped but we still want to get amount of data flushed.
1454- * @param len Buffer size, not used if @p buf is null.
1455- *
1456- * @return number of bytes flushed from the fifo.
1457- */
1458-
1459- static uint8_t rx_flush (const struct device * dev , uint8_t * buf )
1460- {
1461- /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
1462- static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
1463- NRF_UARTE_Type * uarte = get_uarte_instance (dev );
1464- const struct uarte_nrfx_config * config = dev -> config ;
1465- uint32_t prev_rx_amount ;
1466- uint32_t rx_amount ;
1467-
1468- if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1469- memset (buf , dirty , UARTE_HW_RX_FIFO_SIZE );
1470- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1471- sys_cache_data_flush_range (buf , UARTE_HW_RX_FIFO_SIZE );
1472- }
1473- prev_rx_amount = nrf_uarte_rx_amount_get (uarte );
1474- } else {
1475- prev_rx_amount = 0 ;
1476- }
1477-
1478- nrf_uarte_rx_buffer_set (uarte , buf , UARTE_HW_RX_FIFO_SIZE );
1479- /* Final part of handling RXTO event is in ENDRX interrupt
1480- * handler. ENDRX is generated as a result of FLUSHRX task.
1481- */
1482- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1483- nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
1484- while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
1485- /* empty */
1486- }
1487- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1488- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
1489-
1490- rx_amount = nrf_uarte_rx_amount_get (uarte );
1491- if (!buf || !IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1492- return rx_amount ;
1493- }
1494-
1495- if (rx_amount != prev_rx_amount ) {
1496- return rx_amount ;
1497- }
1498-
1499- if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
1500- return 0 ;
1501- }
1502-
1503- /* If event is set that means it came after ENDRX so there was at least
1504- * one byte in the RX FIFO.
1505- */
1506- if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
1507- return rx_amount ;
1508- }
1509-
1510- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1511- sys_cache_data_invd_range (buf , UARTE_HW_RX_FIFO_SIZE );
1512- }
1513-
1514- for (int i = 0 ; i < rx_amount ; i ++ ) {
1515- if (buf [i ] != dirty ) {
1516- return rx_amount ;
1517- }
1518- }
1519-
1520- return 0 ;
1521- }
15221534
15231535/* This handler is called when the receiver is stopped. If rx was aborted
15241536 * data from fifo is flushed.
@@ -1551,17 +1563,16 @@ static void rxto_isr(const struct device *dev)
15511563 async_rx -> discard_fifo = false;
15521564#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
15531565 if (HW_RX_COUNTING_ENABLED (config )) {
1554- uint8_t buf [UARTE_HW_RX_FIFO_SIZE ];
1555-
15561566 /* It need to be included because TIMER+PPI got RXDRDY events
15571567 * and counted those flushed bytes.
15581568 */
1559- async_rx -> total_user_byte_cnt += rx_flush (dev , buf );
1560- ( void ) buf ;
1569+ rx_flush_prepare (dev );
1570+ async_rx -> total_user_byte_cnt += rx_flush ( dev ) ;
15611571 }
1572+ async_rx -> prev_amount = 0 ;
15621573#endif
15631574 } else {
1564- async_rx -> flush_cnt = rx_flush (dev , config -> rx_flush_buf );
1575+ rx_flush_prepare (dev );
15651576 }
15661577
15671578#ifdef UARTE_HAS_FRAME_TIMEOUT
@@ -2173,6 +2184,9 @@ static void uarte_pm_suspend(const struct device *dev)
21732184 (void )data ;
21742185#ifdef UARTE_ANY_ASYNC
21752186 if (data -> async ) {
2187+ if (data -> async -> rx .prev_amount ) {
2188+ data -> async -> rx .flush_cnt = rx_flush (dev );
2189+ }
21762190 /* Entering inactive state requires device to be no
21772191 * active asynchronous calls.
21782192 */
0 commit comments