@@ -160,6 +160,7 @@ struct uarte_async_rx {
160160 /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
161161 volatile bool is_in_irq ;
162162#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
163+ uint16_t prev_amount ;
163164 uint8_t flush_cnt ;
164165 volatile bool enabled ;
165166 volatile bool discard_fifo ;
@@ -658,6 +659,104 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
658659}
659660
660661#if defined(UARTE_ANY_ASYNC )
662+
663+ /* Function for preparing flushing internal RX fifo. Actual flushing is performed
664+ * only if UARTE is going to be disabled. For example when runtime device PM is used
665+ * then UARTE might kept enabled and flushing is not performed. Preparation is
666+ * harmless so it is always possible to just start another RX.
667+ *
668+ * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
669+ * remains. In certain cases it makes it impossible to distinguish between
670+ * case when fifo was empty and not. Function is trying to minimize chances of
671+ * error with following measures:
672+ * - RXAMOUNT is read before flushing and compared against value after flushing
673+ * if they differ it indicates that data was flushed
674+ * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
675+ * it is still dirty. If not then it indicates that data was flushed
676+ *
677+ * In other cases function indicates that fifo was empty. It means that if
678+ * number of bytes in the fifo equal last rx transfer length and data is equal
679+ * to dirty marker it will be discarded.
680+ *
681+ * @param dev Device.
682+ */
683+ static void rx_flush_prepare (const struct device * dev )
684+ {
685+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
686+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
687+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
688+ const struct uarte_nrfx_config * config = dev -> config ;
689+ struct uarte_nrfx_data * data = dev -> data ;
690+
691+ if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
692+ memset (config -> rx_flush_buf , dirty , UARTE_HW_RX_FIFO_SIZE );
693+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
694+ sys_cache_data_flush_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
695+ }
696+ }
697+
698+ data -> async -> rx .prev_amount = nrf_uarte_rx_amount_get (uarte );
699+ nrf_uarte_rx_buffer_set (uarte , config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
700+ /* Final part of handling RXTO event is in ENDRX interrupt
701+ * handler. ENDRX is generated as a result of FLUSHRX task.
702+ */
703+ __ASSERT_NO_MSG (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX ));
704+ }
705+
706+ /** @brief Actual RX FIFO flushing
707+ *
708+ * @param dev Device.
709+ *
710+ * @return number of bytes flushed from the fifo.
711+ */
712+ static uint8_t rx_flush (const struct device * dev )
713+ {
714+ /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
715+ static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
716+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
717+ const struct uarte_nrfx_config * config = dev -> config ;
718+ struct uarte_nrfx_data * data = dev -> data ;
719+ uint32_t rx_amount ;
720+
721+ nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
722+ while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
723+ /* empty */
724+ }
725+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
726+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
727+
728+ if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
729+ sys_cache_data_invd_range (config -> rx_flush_buf , UARTE_HW_RX_FIFO_SIZE );
730+ }
731+
732+ rx_amount = nrf_uarte_rx_amount_get (uarte );
733+ if (!IS_ENABLED (RX_FLUSH_WORKAROUND )) {
734+ return rx_amount ;
735+ }
736+
737+ if (rx_amount != data -> async -> rx .prev_amount ) {
738+ return rx_amount ;
739+ }
740+
741+ if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
742+ return 0 ;
743+ }
744+
745+ /* If event is set that means it came after ENDRX so there was at least
746+ * one byte in the RX FIFO.
747+ */
748+ if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
749+ return rx_amount ;
750+ }
751+
752+ for (int i = 0 ; i < rx_amount ; i ++ ) {
753+ if (config -> rx_flush_buf [i ] != dirty ) {
754+ return rx_amount ;
755+ }
756+ }
757+
758+ return 0 ;
759+ }
661760/** @brief Disable UARTE peripheral is not used by RX or TX.
662761 *
663762 * It must be called with interrupts locked so that deciding if no direction is
@@ -678,6 +777,9 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
678777 return ;
679778 }
680779
780+ if (data -> async && data -> async -> rx .prev_amount ) {
781+ data -> async -> rx .flush_cnt = rx_flush (dev );
782+ }
681783#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
682784 const struct uarte_nrfx_config * config = dev -> config ;
683785
@@ -691,9 +793,6 @@ static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, ui
691793
692794 nrf_uarte_disable (get_uarte_instance (dev ));
693795}
694- #endif
695-
696- #ifdef UARTE_ANY_ASYNC
697796
698797static void rx_timeout (struct k_timer * timer );
699798static void tx_timeout (struct k_timer * timer );
@@ -1434,93 +1533,6 @@ static void endrx_isr(const struct device *dev)
14341533#endif
14351534}
14361535
1437- /* Function for flushing internal RX fifo. Function can be called in case
1438- * flushed data is discarded or when data is valid and needs to be retrieved.
1439- *
1440- * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
1441- * remains. In certain cases it makes it impossible to distinguish between
1442- * case when fifo was empty and not. Function is trying to minimize chances of
1443- * error with following measures:
1444- * - RXAMOUNT is read before flushing and compared against value after flushing
1445- * if they differ it indicates that data was flushed
1446- * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
1447- * it is still dirty. If not then it indicates that data was flushed
1448- *
1449- * In other cases function indicates that fifo was empty. It means that if
1450- * number of bytes in the fifo equal last rx transfer length and data is equal
1451- * to dirty marker it will be discarded.
1452- *
1453- * @param dev Device.
1454- * @param buf Buffer for flushed data, null indicates that flushed data can be
1455- * dropped but we still want to get amount of data flushed.
1456- * @param len Buffer size, not used if @p buf is null.
1457- *
1458- * @return number of bytes flushed from the fifo.
1459- */
1460-
1461- static uint8_t rx_flush (const struct device * dev , uint8_t * buf )
1462- {
1463- /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
1464- static const uint8_t dirty = CONFIG_UART_NRFX_UARTE_RX_FLUSH_MAGIC_BYTE ;
1465- NRF_UARTE_Type * uarte = get_uarte_instance (dev );
1466- const struct uarte_nrfx_config * config = dev -> config ;
1467- uint32_t prev_rx_amount ;
1468- uint32_t rx_amount ;
1469-
1470- if (IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1471- memset (buf , dirty , UARTE_HW_RX_FIFO_SIZE );
1472- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1473- sys_cache_data_flush_range (buf , UARTE_HW_RX_FIFO_SIZE );
1474- }
1475- prev_rx_amount = nrf_uarte_rx_amount_get (uarte );
1476- } else {
1477- prev_rx_amount = 0 ;
1478- }
1479-
1480- nrf_uarte_rx_buffer_set (uarte , buf , UARTE_HW_RX_FIFO_SIZE );
1481- /* Final part of handling RXTO event is in ENDRX interrupt
1482- * handler. ENDRX is generated as a result of FLUSHRX task.
1483- */
1484- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1485- nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_FLUSHRX );
1486- while (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_ENDRX )) {
1487- /* empty */
1488- }
1489- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_ENDRX );
1490- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
1491-
1492- rx_amount = nrf_uarte_rx_amount_get (uarte );
1493- if (!buf || !IS_ENABLED (RX_FLUSH_WORKAROUND )) {
1494- return rx_amount ;
1495- }
1496-
1497- if (rx_amount != prev_rx_amount ) {
1498- return rx_amount ;
1499- }
1500-
1501- if (rx_amount > UARTE_HW_RX_FIFO_SIZE ) {
1502- return 0 ;
1503- }
1504-
1505- /* If event is set that means it came after ENDRX so there was at least
1506- * one byte in the RX FIFO.
1507- */
1508- if (nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
1509- return rx_amount ;
1510- }
1511-
1512- if (IS_ENABLED (UARTE_ANY_CACHE ) && (config -> flags & UARTE_CFG_FLAG_CACHEABLE )) {
1513- sys_cache_data_invd_range (buf , UARTE_HW_RX_FIFO_SIZE );
1514- }
1515-
1516- for (int i = 0 ; i < rx_amount ; i ++ ) {
1517- if (buf [i ] != dirty ) {
1518- return rx_amount ;
1519- }
1520- }
1521-
1522- return 0 ;
1523- }
15241536
15251537/* This handler is called when the receiver is stopped. If rx was aborted
15261538 * data from fifo is flushed.
@@ -1553,17 +1565,16 @@ static void rxto_isr(const struct device *dev)
15531565 async_rx -> discard_fifo = false;
15541566#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
15551567 if (HW_RX_COUNTING_ENABLED (config )) {
1556- uint8_t buf [UARTE_HW_RX_FIFO_SIZE ];
1557-
15581568 /* It need to be included because TIMER+PPI got RXDRDY events
15591569 * and counted those flushed bytes.
15601570 */
1561- async_rx -> total_user_byte_cnt += rx_flush (dev , buf );
1562- ( void ) buf ;
1571+ rx_flush_prepare (dev );
1572+ async_rx -> total_user_byte_cnt += rx_flush ( dev ) ;
15631573 }
1574+ async_rx -> prev_amount = 0 ;
15641575#endif
15651576 } else {
1566- async_rx -> flush_cnt = rx_flush (dev , config -> rx_flush_buf );
1577+ rx_flush_prepare (dev );
15671578 }
15681579
15691580#ifdef UARTE_HAS_FRAME_TIMEOUT
@@ -2175,6 +2186,9 @@ static void uarte_pm_suspend(const struct device *dev)
21752186 (void )data ;
21762187#ifdef UARTE_ANY_ASYNC
21772188 if (data -> async ) {
2189+ if (data -> async -> rx .prev_amount ) {
2190+ data -> async -> rx .flush_cnt = rx_flush (dev );
2191+ }
21782192 /* Entering inactive state requires device to be no
21792193 * active asynchronous calls.
21802194 */
0 commit comments