1717LOG_MODULE_REGISTER (can_mcan , CONFIG_CAN_LOG_LEVEL );
1818
1919#define CAN_INIT_TIMEOUT_MS 100
20+ #define TX_ABORTED_TIMEOUT_MS 100
21+ #define TXBCF_TIMER_TIMEOUT K_MSEC(CONFIG_CAN_MCAN_TXBCF_POLL_INTERVAL_MS)
2022
2123int can_mcan_read_reg (const struct device * dev , uint16_t reg , uint32_t * val )
2224{
@@ -276,7 +278,7 @@ int can_mcan_get_capabilities(const struct device *dev, can_mode_t *cap)
276278{
277279 ARG_UNUSED (dev );
278280
279- * cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY ;
281+ * cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT ;
280282
281283 if (IS_ENABLED (CONFIG_CAN_MANUAL_RECOVERY_MODE )) {
282284 * cap |= CAN_MODE_MANUAL_RECOVERY ;
@@ -294,6 +296,7 @@ int can_mcan_start(const struct device *dev)
294296 const struct can_mcan_config * config = dev -> config ;
295297 struct can_mcan_data * data = dev -> data ;
296298 int err = 0 ;
299+ uint32_t cccr ;
297300
298301 if (data -> common .started ) {
299302 return - EALREADY ;
@@ -322,12 +325,80 @@ int can_mcan_start(const struct device *dev)
322325 return err ;
323326 }
324327
328+ err = can_mcan_read_reg (dev , CAN_MCAN_CCCR , & cccr );
329+ if (err != 0 ) {
330+ return err ;
331+ }
332+
333+ if (cccr & CAN_MCAN_CCCR_DAR ) {
334+ /*
335+ * When DAR (Disable Automatic Retransmission), used for CAN_MODE_ONE_SHOT,
336+ * is enabled, and a transmission fails, a bug in the MCAN IP prevents the
337+ * TCF (Transmission Cancellation Finalized) interrupt from triggering,
338+ * despite the correct bit being set in the TXBCF register. It is thus
339+ * neccesary to poll TXBCF register to detect when a transmission failed if
340+ * DAR is enabled.
341+ */
342+ k_timer_start (& data -> txbcf_timer , TXBCF_TIMER_TIMEOUT , TXBCF_TIMER_TIMEOUT );
343+ }
344+
325345 data -> common .started = true;
326346 pm_device_busy_set (dev );
327347
328348 return err ;
329349}
330350
351+ static int can_mcan_read_txbcf (const struct device * dev )
352+ {
353+ const struct can_mcan_config * config = dev -> config ;
354+ const struct can_mcan_callbacks * cbs = config -> callbacks ;
355+ struct can_mcan_data * data = dev -> data ;
356+ uint32_t txbcfs ;
357+ int err ;
358+ can_tx_callback_t tx_cb ;
359+ void * user_data ;
360+
361+ err = can_mcan_read_reg (dev , CAN_MCAN_TXBCF , & txbcfs );
362+ if (err != 0 ) {
363+ LOG_ERR ("failed to read tx cancellation finished (err %d)" , err );
364+ return err ;
365+ }
366+
367+ if (txbcfs == 0 ) {
368+ return 0 ;
369+ }
370+
371+ for (size_t tx_idx = 0 ; tx_idx < cbs -> num_tx ; tx_idx ++ ) {
372+ if ((txbcfs & BIT (tx_idx )) == 0 ) {
373+ continue ;
374+ }
375+
376+ if (cbs -> tx [tx_idx ].function == NULL ) {
377+ continue ;
378+ }
379+
380+ tx_cb = cbs -> tx [tx_idx ].function ;
381+ user_data = cbs -> tx [tx_idx ].user_data ;
382+ cbs -> tx [tx_idx ].function = NULL ;
383+ LOG_DBG ("tx buffer cancellation finished (idx %u)" , tx_idx );
384+ k_sem_give (& data -> tx_sem );
385+ tx_cb (dev , - ENETDOWN , user_data );
386+ }
387+
388+ if (k_sem_count_get (& data -> tx_sem ) == cbs -> num_tx ) {
389+ k_sem_give (& data -> txbcr_sem );
390+ }
391+
392+ return 0 ;
393+ }
394+
395+ static void can_mcan_txbcf_timer_handler (struct k_timer * timer_id )
396+ {
397+ const struct device * dev = k_timer_user_data_get (timer_id );
398+
399+ can_mcan_read_txbcf (dev );
400+ }
401+
331402static bool can_mcan_rx_filters_exist (const struct device * dev )
332403{
333404 const struct can_mcan_config * config = dev -> config ;
@@ -352,17 +423,30 @@ static bool can_mcan_rx_filters_exist(const struct device *dev)
352423int can_mcan_stop (const struct device * dev )
353424{
354425 const struct can_mcan_config * config = dev -> config ;
355- const struct can_mcan_callbacks * cbs = config -> callbacks ;
356426 struct can_mcan_data * data = dev -> data ;
357- can_tx_callback_t tx_cb ;
358- uint32_t tx_idx ;
359427 int err ;
360428
361429 if (!data -> common .started ) {
362430 return - EALREADY ;
363431 }
364432
365- /* CAN transmissions are automatically stopped when entering init mode */
433+
434+ /* Request all TX buffers to be cancelled */
435+ k_sem_reset (& data -> txbcr_sem );
436+ err = can_mcan_write_reg (dev , CAN_MCAN_TXBCR , CAN_MCAN_TXBCR_CR );
437+ if (err != 0 ) {
438+ return err ;
439+ }
440+
441+ /* Wait for all TX buffers to be cancelled */
442+ err = k_sem_take (& data -> txbcr_sem , K_MSEC (TX_ABORTED_TIMEOUT_MS ));
443+ if (err != 0 ) {
444+ LOG_ERR ("Timed out waiting for all TX buffers to be cancelled" );
445+ return err ;
446+ }
447+
448+ k_timer_stop (& data -> txbcf_timer );
449+
366450 err = can_mcan_enter_init_mode (dev , K_MSEC (CAN_INIT_TIMEOUT_MS ));
367451 if (err != 0 ) {
368452 LOG_ERR ("Failed to enter init mode" );
@@ -381,16 +465,6 @@ int can_mcan_stop(const struct device *dev)
381465
382466 data -> common .started = false;
383467
384- for (tx_idx = 0U ; tx_idx < cbs -> num_tx ; tx_idx ++ ) {
385- tx_cb = cbs -> tx [tx_idx ].function ;
386-
387- if (tx_cb != NULL ) {
388- cbs -> tx [tx_idx ].function = NULL ;
389- tx_cb (dev , - ENETDOWN , cbs -> tx [tx_idx ].user_data );
390- k_sem_give (& data -> tx_sem );
391- }
392- }
393-
394468 k_mutex_lock (& data -> lock , K_FOREVER );
395469 if (!can_mcan_rx_filters_exist (dev )) {
396470 pm_device_busy_clear (dev );
@@ -402,7 +476,7 @@ int can_mcan_stop(const struct device *dev)
402476
403477int can_mcan_set_mode (const struct device * dev , can_mode_t mode )
404478{
405- can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY ;
479+ can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT ;
406480 struct can_mcan_data * data = dev -> data ;
407481 uint32_t cccr ;
408482 uint32_t test ;
@@ -460,6 +534,13 @@ int can_mcan_set_mode(const struct device *dev, can_mode_t mode)
460534 }
461535#endif /* CONFIG_CAN_FD_MODE */
462536
537+ if ((mode & CAN_MODE_ONE_SHOT ) != 0 ) {
538+ /* Disable Automatic Retransmission */
539+ cccr |= CAN_MCAN_CCCR_DAR ;
540+ } else {
541+ cccr &= ~CAN_MCAN_CCCR_DAR ;
542+ }
543+
463544 err = can_mcan_write_reg (dev , CAN_MCAN_CCCR , cccr );
464545 if (err != 0 ) {
465546 goto unlock ;
@@ -480,12 +561,9 @@ int can_mcan_set_mode(const struct device *dev, can_mode_t mode)
480561
481562static void can_mcan_state_change_handler (const struct device * dev )
482563{
483- const struct can_mcan_config * config = dev -> config ;
484564 struct can_mcan_data * data = dev -> data ;
485565 const can_state_change_callback_t state_cb = data -> common .state_change_cb ;
486566 void * state_cb_data = data -> common .state_change_cb_user_data ;
487- const struct can_mcan_callbacks * cbs = config -> callbacks ;
488- can_tx_callback_t tx_cb ;
489567 struct can_bus_err_cnt err_cnt ;
490568 enum can_state state ;
491569 uint32_t cccr ;
@@ -507,17 +585,6 @@ static void can_mcan_state_change_handler(const struct device *dev)
507585 return ;
508586 }
509587
510- /* Call all TX queue callbacks with -ENETUNREACH */
511- for (uint32_t tx_idx = 0U ; tx_idx < cbs -> num_tx ; tx_idx ++ ) {
512- tx_cb = cbs -> tx [tx_idx ].function ;
513-
514- if (tx_cb != NULL ) {
515- cbs -> tx [tx_idx ].function = NULL ;
516- tx_cb (dev , - ENETUNREACH , cbs -> tx [tx_idx ].user_data );
517- k_sem_give (& data -> tx_sem );
518- }
519- }
520-
521588 if (!IS_ENABLED (CONFIG_CAN_MANUAL_RECOVERY_MODE ) ||
522589 (data -> common .mode & CAN_MODE_MANUAL_RECOVERY ) == 0U ) {
523590 /*
@@ -653,7 +720,8 @@ void can_mcan_line_0_isr(const struct device *dev)
653720{
654721 const uint32_t events = CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW |
655722 CAN_MCAN_IR_TEFN | CAN_MCAN_IR_TEFL | CAN_MCAN_IR_ARA |
656- CAN_MCAN_IR_MRAF | CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED ;
723+ CAN_MCAN_IR_MRAF | CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED |
724+ CAN_MCAN_IR_TCF ;
657725 struct can_mcan_data * data = dev -> data ;
658726 uint32_t ir ;
659727 int err ;
@@ -694,11 +762,16 @@ void can_mcan_line_0_isr(const struct device *dev)
694762#ifdef CONFIG_CAN_STATS
695763 if ((ir & (CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED )) != 0U ) {
696764 uint32_t reg ;
765+
697766 /* This function automatically updates protocol error stats */
698767 can_mcan_read_psr (dev , & reg );
699768 }
700769#endif
701770
771+ if ((ir & CAN_MCAN_IR_TCF ) != 0U ) {
772+ can_mcan_read_txbcf (dev );
773+ }
774+
702775 err = can_mcan_read_reg (dev , CAN_MCAN_IR , & ir );
703776 if (err != 0 ) {
704777 return ;
@@ -1010,6 +1083,18 @@ int can_mcan_send(const struct device *dev, const struct can_frame *frame, k_tim
10101083 return - ENETUNREACH ;
10111084 }
10121085
1086+ err = can_mcan_read_reg (dev , CAN_MCAN_CCCR , & reg );
1087+ if (err != 0 ) {
1088+ return err ;
1089+ }
1090+
1091+ if (reg & CAN_MCAN_CCCR_DAR ) {
1092+ err = can_mcan_read_txbcf (dev );
1093+ if (err != 0 ) {
1094+ return err ;
1095+ }
1096+ }
1097+
10131098 err = k_sem_take (& data -> tx_sem , timeout );
10141099 if (err != 0 ) {
10151100 return - EAGAIN ;
@@ -1424,6 +1509,9 @@ int can_mcan_init(const struct device *dev)
14241509 k_mutex_init (& data -> lock );
14251510 k_mutex_init (& data -> tx_mtx );
14261511 k_sem_init (& data -> tx_sem , cbs -> num_tx , cbs -> num_tx );
1512+ k_sem_init (& data -> txbcr_sem , 0 , 1 );
1513+ k_timer_init (& data -> txbcf_timer , can_mcan_txbcf_timer_handler , NULL );
1514+ k_timer_user_data_set (& data -> txbcf_timer , (void * )dev );
14271515
14281516 if (config -> common .phy != NULL && !device_is_ready (config -> common .phy )) {
14291517 LOG_ERR ("CAN transceiver not ready" );
@@ -1533,7 +1621,7 @@ int can_mcan_init(const struct device *dev)
15331621
15341622 reg = CAN_MCAN_IE_BOE | CAN_MCAN_IE_EWE | CAN_MCAN_IE_EPE | CAN_MCAN_IE_MRAFE |
15351623 CAN_MCAN_IE_TEFLE | CAN_MCAN_IE_TEFNE | CAN_MCAN_IE_RF0NE | CAN_MCAN_IE_RF1NE |
1536- CAN_MCAN_IE_RF0LE | CAN_MCAN_IE_RF1LE ;
1624+ CAN_MCAN_IE_RF0LE | CAN_MCAN_IE_RF1LE | CAN_MCAN_IE_TCFE ;
15371625#ifdef CONFIG_CAN_STATS
15381626 /* These ISRs are only enabled/used for statistics, they are otherwise
15391627 * disabled as they may produce a significant amount of frequent ISRs.
@@ -1565,5 +1653,14 @@ int can_mcan_init(const struct device *dev)
15651653 return err ;
15661654 }
15671655
1656+ /*
1657+ * Interrupt on every TX buffer cancellation finished event.
1658+ */
1659+ reg = CAN_MCAN_TXBCIE_CFIE ;
1660+ err = can_mcan_write_reg (dev , CAN_MCAN_TXBCIE , reg );
1661+ if (err != 0 ) {
1662+ return err ;
1663+ }
1664+
15681665 return can_mcan_clear_mram (dev , 0 , config -> mram_size );
15691666}
0 commit comments