88#include <zephyr/kernel.h>
99#include <zephyr/drivers/uart.h>
1010#include <zephyr/cache.h>
11+ #include <zephyr/shell/shell.h>
1112#include <zephyr/logging/log.h>
1213#include <zephyr/logging/log_output.h>
1314#include <zephyr/logging/log_frontend_stmesp.h>
1617#include <zephyr/debug/mipi_stp_decoder.h>
1718#include <zephyr/linker/devicetree_regions.h>
1819#include <zephyr/drivers/misc/coresight/nrf_etr.h>
20+ #include <zephyr/drivers/serial/uart_async_rx.h>
1921#include <zephyr/sys/printk.h>
2022#include <dmm.h>
2123#include <nrfx_tbm.h>
@@ -131,6 +133,20 @@ static const char *const hw_evts[] = {
131133 "GD0 HS down" , /* 31 Global domain high speed 0 down */
132134};
133135
136+ #ifdef CONFIG_NRF_ETR_SHELL
137+ #define RX_BUF_SIZE \
138+ (CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_SIZE * CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT)
139+
140+ static void etr_timer_handler (struct k_timer * timer );
141+ K_TIMER_DEFINE (etr_timer , etr_timer_handler , NULL );
142+ static uint8_t rx_buf [RX_BUF_SIZE ] DMM_MEMORY_SECTION (UART_NODE );
143+ static struct uart_async_rx async_rx ;
144+ static atomic_t pending_rx_req ;
145+ static const struct shell etr_shell ;
146+ static shell_transport_handler_t shell_handler ;
147+ static void * shell_context ;
148+ #endif
149+
134150static int log_output_func (uint8_t * buf , size_t size , void * ctx )
135151{
136152 if (use_async_uart ) {
@@ -610,6 +626,7 @@ void nrf_etr_flush(void)
610626 irq_unlock (k );
611627}
612628
629+ #ifndef CONFIG_NRF_ETR_SHELL
613630static void etr_thread_func (void * dummy1 , void * dummy2 , void * dummy3 )
614631{
615632 uint64_t checkpoint = 0 ;
@@ -642,6 +659,7 @@ static void etr_thread_func(void *dummy1, void *dummy2, void *dummy3)
642659 k_sleep (K_MSEC (CONFIG_NRF_ETR_BACKOFF ));
643660 }
644661}
662+ #endif
645663
646664static void uart_event_handler (const struct device * dev , struct uart_event * evt , void * user_data )
647665{
@@ -653,6 +671,33 @@ static void uart_event_handler(const struct device *dev, struct uart_event *evt,
653671 case UART_TX_DONE :
654672 k_sem_give (& uart_sem );
655673 break ;
674+ #ifdef CONFIG_NRF_ETR_SHELL
675+ case UART_RX_RDY :
676+ uart_async_rx_on_rdy (& async_rx , evt -> data .rx .buf , evt -> data .rx .len );
677+ shell_handler (SHELL_TRANSPORT_EVT_RX_RDY , shell_context );
678+ break ;
679+ case UART_RX_BUF_REQUEST : {
680+ uint8_t * buf = uart_async_rx_buf_req (& async_rx );
681+ size_t len = uart_async_rx_get_buf_len (& async_rx );
682+
683+ if (buf ) {
684+ int err = uart_rx_buf_rsp (dev , buf , len );
685+
686+ if (err < 0 ) {
687+ uart_async_rx_on_buf_rel (& async_rx , buf );
688+ }
689+ } else {
690+ atomic_inc (& pending_rx_req );
691+ }
692+
693+ break ;
694+ }
695+ case UART_RX_BUF_RELEASED :
696+ uart_async_rx_on_buf_rel (& async_rx , evt -> data .rx_buf .buf );
697+ break ;
698+ case UART_RX_DISABLED :
699+ break ;
700+ #endif /* CONFIG_NRF_ETR_SHELL */
656701 default :
657702 __ASSERT_NO_MSG (0 );
658703 }
@@ -666,7 +711,11 @@ static void tbm_event_handler(nrf_tbm_event_t event)
666711 tbm_full = true;
667712 }
668713
714+ #ifdef CONFIG_NRF_ETR_SHELL
715+ k_poll_signal_raise (& etr_shell .ctx -> signals [SHELL_SIGNAL_LOG_MSG ], 0 );
716+ #else
669717 k_wakeup (& etr_thread );
718+ #endif
670719}
671720
672721int etr_process_init (void )
@@ -686,12 +735,178 @@ int etr_process_init(void)
686735 nrfx_isr , nrfx_tbm_irq_handler , 0 );
687736 irq_enable (DT_IRQN (DT_NODELABEL (tbm )));
688737
689- k_thread_create (& etr_thread , etr_stack , K_KERNEL_STACK_SIZEOF (etr_stack ),
690- etr_thread_func , NULL , NULL , NULL , K_LOWEST_APPLICATION_THREAD_PRIO , 0 ,
691- K_NO_WAIT );
738+ #ifdef CONFIG_NRF_ETR_SHELL
739+ uint32_t level = CONFIG_LOG_MAX_LEVEL ;
740+ static const struct shell_backend_config_flags cfg_flags =
741+ SHELL_DEFAULT_BACKEND_CONFIG_FLAGS ;
742+
743+ shell_init (& etr_shell , NULL , cfg_flags , true, level );
744+ k_timer_start (& etr_timer , K_MSEC (CONFIG_NRF_ETR_BACKOFF ), K_NO_WAIT );
745+ if (IS_ENABLED (CONFIG_NRF_ETR_DECODE ) || IS_ENABLED (CONFIG_NRF_ETR_DEBUG )) {
746+ err = decoder_init ();
747+ if (err < 0 ) {
748+ return err ;
749+ }
750+ }
751+ #else
752+ k_thread_create (& etr_thread , etr_stack , K_KERNEL_STACK_SIZEOF (etr_stack ), etr_thread_func ,
753+ NULL , NULL , NULL , K_LOWEST_APPLICATION_THREAD_PRIO , 0 , K_NO_WAIT );
692754 k_thread_name_set (& etr_thread , "etr_process" );
755+ #endif
693756
694757 return 0 ;
695758}
696759
697760SYS_INIT (etr_process_init , POST_KERNEL , CONFIG_KERNEL_INIT_PRIORITY_DEFAULT );
761+
762+ #ifdef CONFIG_NRF_ETR_SHELL
763+
764+ static void etr_timer_handler (struct k_timer * timer )
765+ {
766+ if (pending_data () >= MIN_DATA ) {
767+ k_poll_signal_raise (& etr_shell .ctx -> signals [SHELL_SIGNAL_LOG_MSG ], 0 );
768+ } else {
769+ k_timer_start (timer , K_MSEC (CONFIG_NRF_ETR_BACKOFF ), K_NO_WAIT );
770+ }
771+ }
772+
773+ bool z_shell_log_backend_process (const struct shell_log_backend * backend )
774+ {
775+ ARG_UNUSED (backend );
776+
777+ process ();
778+ k_timer_start (& etr_timer , K_MSEC (CONFIG_NRF_ETR_BACKOFF ), K_NO_WAIT );
779+
780+ return false;
781+ }
782+
783+ void z_shell_log_backend_disable (const struct shell_log_backend * backend )
784+ {
785+ ARG_UNUSED (backend );
786+ }
787+
788+ void z_shell_log_backend_enable (const struct shell_log_backend * backend , void * ctx ,
789+ uint32_t init_log_level )
790+ {
791+ ARG_UNUSED (backend );
792+ ARG_UNUSED (ctx );
793+ ARG_UNUSED (init_log_level );
794+ }
795+
796+ static int etr_shell_write (const struct shell_transport * transport , const void * data , size_t length ,
797+ size_t * cnt )
798+ {
799+ size_t len = length ;
800+ uint8_t * buf = (uint8_t * )data ;
801+ size_t chunk_len ;
802+
803+ do {
804+ chunk_len = MIN (len , sizeof (log_output_buf ));
805+ len -= log_output_func (buf , chunk_len , NULL );
806+ buf += chunk_len ;
807+ } while (len > 0 );
808+
809+ * cnt = length ;
810+ shell_handler (SHELL_TRANSPORT_EVT_TX_RDY , shell_context );
811+
812+ return 0 ;
813+ }
814+
815+ static int rx_enable (uint8_t * buf , size_t len )
816+ {
817+ return uart_rx_enable (uart_dev , buf , len , 10000 );
818+ }
819+
820+ static int etr_shell_read (const struct shell_transport * transport , void * data , size_t length ,
821+ size_t * cnt )
822+ {
823+ uint8_t * buf ;
824+ size_t blen ;
825+ bool buf_available ;
826+
827+ blen = uart_async_rx_data_claim (& async_rx , & buf , length );
828+ memcpy (data , buf , blen );
829+ buf_available = uart_async_rx_data_consume (& async_rx , blen );
830+
831+ * cnt = blen ;
832+ if (pending_rx_req && buf_available ) {
833+ uint8_t * buf = uart_async_rx_buf_req (& async_rx );
834+ size_t len = uart_async_rx_get_buf_len (& async_rx );
835+ int err ;
836+
837+ __ASSERT_NO_MSG (buf != NULL );
838+ atomic_dec (& pending_rx_req );
839+ err = uart_rx_buf_rsp (uart_dev , buf , len );
840+ /* If it is too late and RX is disabled then re-enable it. */
841+ if (err < 0 ) {
842+ if (err == - EACCES ) {
843+ pending_rx_req = 0 ;
844+ err = rx_enable (buf , len );
845+ } else {
846+ return err ;
847+ }
848+ }
849+ }
850+
851+ return 0 ;
852+ }
853+
854+ static int etr_shell_enable (const struct shell_transport * transport , bool blocking_tx )
855+ {
856+ return 0 ;
857+ }
858+
859+ static int etr_shell_uninit (const struct shell_transport * transport )
860+ {
861+ return 0 ;
862+ }
863+
864+ static int etr_shell_init (const struct shell_transport * transport , const void * config ,
865+ shell_transport_handler_t evt_handler , void * context )
866+ {
867+ int err ;
868+ uint8_t * buf ;
869+ static const struct uart_async_rx_config async_rx_config = {
870+ .buffer = rx_buf ,
871+ .length = sizeof (rx_buf ),
872+ .buf_cnt = CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT ,
873+ };
874+
875+ shell_context = context ;
876+ shell_handler = evt_handler ;
877+ err = uart_async_rx_init (& async_rx , & async_rx_config );
878+ if (err ) {
879+ return err ;
880+ }
881+
882+ buf = uart_async_rx_buf_req (& async_rx );
883+
884+ return rx_enable (buf , uart_async_rx_get_buf_len (& async_rx ));
885+ }
886+
887+ #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
888+ static void etr_shell_update (const struct shell_transport * transport )
889+ {
890+ }
891+ #endif
892+
893+ const struct shell_transport_api shell_api = {
894+ .init = etr_shell_init ,
895+ .uninit = etr_shell_uninit ,
896+ .enable = etr_shell_enable ,
897+ .write = etr_shell_write ,
898+ .read = etr_shell_read ,
899+ #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
900+ .update = shell_update ,
901+ #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
902+ };
903+
904+ static struct shell_transport transport = {
905+ .api = & shell_api ,
906+ .ctx = NULL ,
907+ };
908+
909+ static uint8_t shell_out_buffer [CONFIG_SHELL_PRINTF_BUFF_SIZE ];
910+ Z_SHELL_DEFINE (etr_shell , CONFIG_NRF_ETR_SHELL_PROMPT , & transport , shell_out_buffer , NULL ,
911+ SHELL_FLAG_OLF_CRLF );
912+ #endif /* CONFIG_NRF_ETR_SHELL */
0 commit comments