@@ -166,6 +166,7 @@ struct uarte_async_rx {
166166 /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
167167 volatile bool is_in_irq ;
168168#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
169+ uint16_t prev_amount ;
169170 uint8_t flush_cnt ;
170171 volatile bool enabled ;
171172 volatile bool discard_fifo ;
@@ -659,6 +660,104 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
659660}
660661
661662#if defined(UARTE_ANY_ASYNC )
663+
664+ /* Function for preparing flushing internal RX fifo. Actual flushing is performed
665+ * only if UARTE is going to be disabled. For example when runtime device PM is used
666+ * then UARTE might kept enabled and flushing is not performed. Preparation is
667+ * harmless so it is always possible to just start another RX.
668+ *
669+ * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
670+ * remains. In certain cases it makes it impossible to distinguish between
671+ * case when fifo was empty and not. Function is trying to minimize chances of
672+ * error with following measures:
673+ * - RXAMOUNT is read before flushing and compared against value after flushing
674+ * if they differ it indicates that data was flushed
675+ * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
676+ * it is still dirty. If not then it indicates that data was flushed
677+ *
678+ * In other cases function indicates that fifo was empty. It means that if
679+ * number of bytes in the fifo equal last rx transfer length and data is equal
680+ * to dirty marker it will be discarded.
681+ *
682+ * @param dev Device.
683+ */
684+ static void rx_flush_prepare (const struct device * dev )
685+ {
686+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
687+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
688+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
689+ const struct uarte_nrfx_config * config = dev -> config ;
690+ struct uarte_nrfx_data * data = dev -> data ;
691+
692+ if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
693+ memset (config -> rx_flush_buf , dirty , UARTE_HW_RX_FIFO_SIZE );
694+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
695+ sys_cache_data_flush_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
696+ }
697+ }
698+
699+ data -> async -> rx .prev_amount = nrf_uarte_rx_amount_get (uarte );
700+ nrf_uarte_rx_buffer_set (uarte , config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
701+ /* Final part of handling RXTO event is in ENDRX interrupt
702+ * handler. ENDRX is generated as a result of FLUSHRX task.
703+ */
704+ __ASSERT_NO_MSG (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX ));
705+ }
706+
707+ /** @brief Actual RX FIFO flushing
708+ *
709+ * @param dev Device.
710+ *
711+ * @return number of bytes flushed from the fifo.
712+ */
713+ static uint8_t rx_flush (const struct device * dev )
714+ {
715+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
716+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
717+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
718+ const struct uarte_nrfx_config * config = dev -> config ;
719+ struct uarte_nrfx_data * data = dev -> data ;
720+ uint32_t rx_amount ;
721+
722+ nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
723+ while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
724+ /* empty */
725+ }
726+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
727+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
728+
729+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
730+ sys_cache_data_invd_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
731+ }
732+
733+ rx_amount = nrf_uarte_rx_amount_get (uarte );
734+ if (!IS_ENABLED (RX_FLUSH_WORKAROUND )) {
735+ return rx_amount ;
736+ }
737+
738+ if (rx_amount != data -> async -> rx .prev_amount ) {
739+ return rx_amount ;
740+ }
741+
742+ if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
743+ return 0 ;
744+ }
745+
746+ /* If event is set that means it came after ENDRX so there was at least
747+ * one byte in the RX FIFO.
748+ */
749+ if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
750+ return rx_amount ;
751+ }
752+
753+ for (int i = 0 ; i < rx_amount ; i ++ ) {
754+ if (config -> rx_flush_buf [i ] != dirty ) {
755+ return rx_amount ;
756+ }
757+ }
758+
759+ return 0 ;
760+ }
662761/** @brief Disable UARTE peripheral is not used by RX or TX.
663762 *
664763 * It must be called with interrupts locked so that deciding if no direction is
@@ -679,6 +778,9 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
679778 return ;
680779 }
681780
781+ if (data -> async && data -> async -> rx .prev_amount ) {
782+ data -> async -> rx .flush_cnt = rx_flush (dev );
783+ }
682784#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
683785 const struct uarte_nrfx_config * config = dev -> config ;
684786
@@ -692,9 +794,6 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
692794
693795 nrf_uarte_disable (get_uarte_instance (dev ));
694796}
695- #endif
696-
697- #ifdef UARTE_ANY_ASYNC
698797
699798static void rx_timeout (struct k_timer * timer );
700799static void tx_timeout (struct k_timer * timer );
@@ -1435,93 +1534,6 @@ static void endrx_isr(const struct device *dev)
14351534#endif
14361535}
14371536
1438- /* Function for flushing internal RX fifo. Function can be called in case
1439- * flushed data is discarded or when data is valid and needs to be retrieved.
1440- *
1441- * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
1442- * remains. In certain cases it makes it impossible to distinguish between
1443- * case when fifo was empty and not. Function is trying to minimize chances of
1444- * error with following measures:
1445- * - RXAMOUNT is read before flushing and compared against value after flushing
1446- * if they differ it indicates that data was flushed
1447- * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
1448- * it is still dirty. If not then it indicates that data was flushed
1449- *
1450- * In other cases function indicates that fifo was empty. It means that if
1451- * number of bytes in the fifo equal last rx transfer length and data is equal
1452- * to dirty marker it will be discarded.
1453- *
1454- * @param dev Device.
1455- * @param buf Buffer for flushed data, null indicates that flushed data can be
1456- * dropped but we still want to get amount of data flushed.
1457- * @param len Buffer size, not used if @p buf is null.
1458- *
1459- * @return number of bytes flushed from the fifo.
1460- */
1461-
1462- static uint8_t rx_flush (const struct device * dev , uint8_t * buf )
1463- {
1464- /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
1465- static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
1466- NRF_UARTE_Type * uarte = get_uarte_instance (dev );
1467- const struct uarte_nrfx_config * config = dev -> config ;
1468- uint32_t prev_rx_amount ;
1469- uint32_t rx_amount ;
1470-
1471- if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1472- memset (buf , dirty , UARTE_HW_RX_FIFO_SIZE );
1473- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1474- sys_cache_data_flush_range (buf , UARTE_HW_RX_FIFO_SIZE );
1475- }
1476- prev_rx_amount = nrf_uarte_rx_amount_get (uarte );
1477- } else {
1478- prev_rx_amount = 0 ;
1479- }
1480-
1481- nrf_uarte_rx_buffer_set (uarte , buf , UARTE_HW_RX_FIFO_SIZE );
1482- /* Final part of handling RXTO event is in ENDRX interrupt
1483- * handler. ENDRX is generated as a result of FLUSHRX task.
1484- */
1485- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1486- nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
1487- while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
1488- /* empty */
1489- }
1490- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1491- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
1492-
1493- rx_amount = nrf_uarte_rx_amount_get (uarte );
1494- if (!buf || !IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1495- return rx_amount ;
1496- }
1497-
1498- if (rx_amount != prev_rx_amount ) {
1499- return rx_amount ;
1500- }
1501-
1502- if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
1503- return 0 ;
1504- }
1505-
1506- /* If event is set that means it came after ENDRX so there was at least
1507- * one byte in the RX FIFO.
1508- */
1509- if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
1510- return rx_amount ;
1511- }
1512-
1513- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1514- sys_cache_data_invd_range (buf , UARTE_HW_RX_FIFO_SIZE );
1515- }
1516-
1517- for (int i = 0 ; i < rx_amount ; i ++ ) {
1518- if (buf [i ] != dirty ) {
1519- return rx_amount ;
1520- }
1521- }
1522-
1523- return 0 ;
1524- }
15251537
15261538/* This handler is called when the receiver is stopped. If rx was aborted
15271539 * data from fifo is flushed.
@@ -1554,17 +1566,16 @@ static void rxto_isr(const struct device *dev)
15541566 async_rx -> discard_fifo = false;
15551567#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
15561568 if (HW_RX_COUNTING_ENABLED (config )) {
1557- uint8_t buf [UARTE_HW_RX_FIFO_SIZE ];
1558-
15591569 /* It need to be included because TIMER+PPI got RXDRDY events
15601570 * and counted those flushed bytes.
15611571 */
1562- async_rx -> total_user_byte_cnt += rx_flush (dev , buf );
1563- ( void ) buf ;
1572+ rx_flush_prepare (dev );
1573+ async_rx -> total_user_byte_cnt += rx_flush ( dev ) ;
15641574 }
1575+ async_rx -> prev_amount = 0 ;
15651576#endif
15661577 } else {
1567- async_rx -> flush_cnt = rx_flush (dev , config -> rx_flush_buf );
1578+ rx_flush_prepare (dev );
15681579 }
15691580
15701581#ifdef UARTE_HAS_FRAME_TIMEOUT
@@ -2178,6 +2189,9 @@ static void uarte_pm_suspend(const struct device *dev)
21782189 (void )data ;
21792190#ifdef UARTE_ANY_ASYNC
21802191 if (data -> async ) {
2192+ if (data -> async -> rx .prev_amount ) {
2193+ data -> async -> rx .flush_cnt = rx_flush (dev );
2194+ }
21812195 /* Entering inactive state requires device to be no
21822196 * active asynchronous calls.
21832197 */
0 commit comments