@@ -99,6 +99,10 @@ uint32_t lpuartdiv_calc(const uint64_t clock_rate, const uint32_t baud_rate)
9999#endif /* USART_PRESC_PRESCALER */
100100#endif /* HAS_LPUART */
101101
102+ #ifdef CONFIG_UART_ASYNC_API
103+ #define STM32_ASYNC_STATUS_TIMEOUT (DMA_STATUS_BLOCK + 1)
104+ #endif
105+
102106#ifdef CONFIG_PM
103107static void uart_stm32_pm_policy_state_lock_get (const struct device * dev )
104108{
@@ -1117,11 +1121,16 @@ static inline void async_evt_rx_rdy(struct uart_stm32_data *data)
11171121 .data .rx .offset = data -> dma_rx .offset
11181122 };
11191123
1120- /* update the current pos for new data */
1121- data -> dma_rx .offset = data -> dma_rx .counter ;
1124+ /* When cyclic DMA is used, buffer positions are not updated - call callback every time*/
1125+ if (data -> dma_rx .dma_cfg .cyclic == 0 ) {
1126+ /* update the current pos for new data */
1127+ data -> dma_rx .offset = data -> dma_rx .counter ;
11221128
1123- /* send event only for new data */
1124- if (event .data .rx .len > 0 ) {
1129+ /* send event only for new data */
1130+ if (event .data .rx .len > 0 ) {
1131+ async_user_callback (data , & event );
1132+ }
1133+ } else {
11251134 async_user_callback (data , & event );
11261135 }
11271136}
@@ -1204,20 +1213,45 @@ static inline void async_timer_start(struct k_work_delayable *work,
12041213 }
12051214}
12061215
1207- static void uart_stm32_dma_rx_flush (const struct device * dev )
1216+ static void uart_stm32_dma_rx_flush (const struct device * dev , int status )
12081217{
12091218 struct dma_status stat ;
12101219 struct uart_stm32_data * data = dev -> data ;
12111220
1212- if (dma_get_status (data -> dma_rx .dma_dev ,
1213- data -> dma_rx .dma_channel , & stat ) == 0 ) {
1214- size_t rx_rcv_len = data -> dma_rx .buffer_length -
1215- stat .pending_length ;
1216- if (rx_rcv_len > data -> dma_rx .offset ) {
1217- data -> dma_rx .counter = rx_rcv_len ;
1221+ size_t rx_rcv_len = 0 ;
12181222
1219- async_evt_rx_rdy (data );
1223+ switch (status ) {
1224+ case DMA_STATUS_COMPLETE :
1225+ /* fully complete */
1226+ data -> dma_rx .counter = data -> dma_rx .buffer_length ;
1227+ break ;
1228+ case DMA_STATUS_BLOCK :
1229+ /* half complete */
1230+ data -> dma_rx .counter = data -> dma_rx .buffer_length / 2 ;
1231+
1232+ break ;
1233+ default : /* likely STM32_ASYNC_STATUS_TIMEOUT */
1234+ if (dma_get_status (data -> dma_rx .dma_dev , data -> dma_rx .dma_channel , & stat ) == 0 ) {
1235+ rx_rcv_len = data -> dma_rx .buffer_length - stat .pending_length ;
1236+ data -> dma_rx .counter = rx_rcv_len ;
12201237 }
1238+ break ;
1239+ }
1240+
1241+ async_evt_rx_rdy (data );
1242+
1243+ switch (status ) { /* update offset*/
1244+ case DMA_STATUS_COMPLETE :
1245+ /* fully complete */
1246+ data -> dma_rx .offset = 0 ;
1247+ break ;
1248+ case DMA_STATUS_BLOCK :
1249+ /* half complete */
1250+ data -> dma_rx .offset = data -> dma_rx .buffer_length / 2 ;
1251+ break ;
1252+ default : /* likely STM32_ASYNC_STATUS_TIMEOUT */
1253+ data -> dma_rx .offset += rx_rcv_len - data -> dma_rx .offset ;
1254+ break ;
12211255 }
12221256}
12231257
@@ -1269,7 +1303,7 @@ static void uart_stm32_isr(const struct device *dev)
12691303 LOG_DBG ("idle interrupt occurred" );
12701304
12711305 if (data -> dma_rx .timeout == 0 ) {
1272- uart_stm32_dma_rx_flush (dev );
1306+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
12731307 } else {
12741308 /* Start the RX timer not null */
12751309 async_timer_start (& data -> dma_rx .timeout_work ,
@@ -1417,7 +1451,7 @@ static int uart_stm32_async_rx_disable(const struct device *dev)
14171451
14181452 LL_USART_DisableIT_IDLE (usart );
14191453
1420- uart_stm32_dma_rx_flush (dev );
1454+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
14211455
14221456 async_evt_rx_buf_release (data );
14231457
@@ -1517,27 +1551,32 @@ void uart_stm32_dma_rx_cb(const struct device *dma_dev, void *user_data,
15171551
15181552 (void )k_work_cancel_delayable (& data -> dma_rx .timeout_work );
15191553
1520- /* true since this functions occurs when buffer if full */
1521- data -> dma_rx .counter = data -> dma_rx . buffer_length ;
1554+ /* If we are in NORMAL MODE */
1555+ if ( data -> dma_rx .dma_cfg . cyclic == 0 ) {
15221556
1523- async_evt_rx_rdy (data );
1524-
1525- if (data -> rx_next_buffer != NULL ) {
1526- async_evt_rx_buf_release (data );
1557+ /* true since this functions occurs when buffer is full */
1558+ data -> dma_rx .counter = data -> dma_rx .buffer_length ;
1559+ async_evt_rx_rdy (data );
1560+ if (data -> rx_next_buffer != NULL ) {
1561+ async_evt_rx_buf_release (data );
15271562
1528- /* replace the buffer when the current
1529- * is full and not the same as the next
1530- * one.
1531- */
1532- uart_stm32_dma_replace_buffer (uart_dev );
1563+ /* replace the buffer when the current
1564+ * is full and not the same as the next
1565+ * one.
1566+ */
1567+ uart_stm32_dma_replace_buffer (uart_dev );
1568+ } else {
1569+ /* Buffer full without valid next buffer,
1570+ * an UART_RX_DISABLED event must be generated,
1571+ * but uart_stm32_async_rx_disable() cannot be
1572+ * called in ISR context. So force the RX timeout
1573+ * to minimum value and let the RX timeout to do the job.
1574+ */
1575+ k_work_reschedule (& data -> dma_rx .timeout_work , K_TICKS (1 ));
1576+ }
15331577 } else {
1534- /* Buffer full without valid next buffer,
1535- * an UART_RX_DISABLED event must be generated,
1536- * but uart_stm32_async_rx_disable() cannot be
1537- * called in ISR context. So force the RX timeout
1538- * to minimum value and let the RX timeout to do the job.
1539- */
1540- k_work_reschedule (& data -> dma_rx .timeout_work , K_TICKS (1 ));
1578+ /* CIRCULAR MODE */
1579+ uart_stm32_dma_rx_flush (data -> uart_dev , status );
15411580 }
15421581}
15431582
@@ -1722,7 +1761,7 @@ static void uart_stm32_async_rx_timeout(struct k_work *work)
17221761 if (data -> dma_rx .counter == data -> dma_rx .buffer_length ) {
17231762 uart_stm32_async_rx_disable (dev );
17241763 } else {
1725- uart_stm32_dma_rx_flush (dev );
1764+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
17261765 }
17271766}
17281767
@@ -1829,9 +1868,10 @@ static int uart_stm32_async_init(const struct device *dev)
18291868 data -> dma_rx .blk_cfg .dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE ;
18301869 }
18311870
1832- /* RX disable circular buffer */
1833- data -> dma_rx .blk_cfg .source_reload_en = 0 ;
1834- data -> dma_rx .blk_cfg .dest_reload_en = 0 ;
1871+ /* Enable/disable RX circular buffer */
1872+ data -> dma_rx .blk_cfg .source_reload_en = data -> dma_rx .dma_cfg .cyclic ;
1873+ data -> dma_rx .blk_cfg .dest_reload_en = data -> dma_rx .dma_cfg .cyclic ;
1874+
18351875 data -> dma_rx .blk_cfg .fifo_mode_control = data -> dma_rx .fifo_threshold ;
18361876
18371877 data -> dma_rx .dma_cfg .head_block = & data -> dma_rx .blk_cfg ;
@@ -1868,6 +1908,10 @@ static int uart_stm32_async_init(const struct device *dev)
18681908 data -> dma_tx .blk_cfg .dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE ;
18691909 }
18701910
1911+ /* Enable/disable TX circular buffer */
1912+ data -> dma_tx .blk_cfg .source_reload_en = data -> dma_tx .dma_cfg .cyclic ;
1913+ data -> dma_tx .blk_cfg .dest_reload_en = data -> dma_tx .dma_cfg .cyclic ;
1914+
18711915 data -> dma_tx .blk_cfg .fifo_mode_control = data -> dma_tx .fifo_threshold ;
18721916
18731917 data -> dma_tx .dma_cfg .head_block = & data -> dma_tx .blk_cfg ;
@@ -2225,6 +2269,8 @@ static int uart_stm32_pm_action(const struct device *dev,
22252269 .dma_slot = STM32_DMA_SLOT(index, dir, slot),\
22262270 .channel_direction = STM32_DMA_CONFIG_DIRECTION( \
22272271 STM32_DMA_CHANNEL_CONFIG(index, dir)),\
2272+ .cyclic = STM32_DMA_CONFIG_CYCLIC( \
2273+ STM32_DMA_CHANNEL_CONFIG(index, dir)), \
22282274 .channel_priority = STM32_DMA_CONFIG_PRIORITY( \
22292275 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
22302276 .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE(\
0 commit comments