|
22 | 22 | #include <dmm.h> |
23 | 23 | #include <nrfx_tbm.h> |
24 | 24 | #include <stdio.h> |
| 25 | + |
| 26 | +#ifdef CONFIG_DEBUG_NRF_ETR_BACKEND_RTT |
| 27 | +#include <SEGGER_RTT.h> |
| 28 | +#endif |
| 29 | + |
25 | 30 | LOG_MODULE_REGISTER(cs_etr_tbm); |
26 | 31 |
|
27 | 32 | #define UART_NODE DT_CHOSEN(zephyr_console) |
@@ -79,7 +84,7 @@ static uint32_t etr_rd_idx; |
79 | 84 | /* Counts number of new messages completed in the current formatter frame decoding. */ |
80 | 85 | static uint32_t new_msg_cnt; |
81 | 86 |
|
82 | | -static bool volatile use_async_uart; |
| 87 | +static bool volatile use_blocking; |
83 | 88 |
|
84 | 89 | static struct k_sem uart_sem; |
85 | 90 | static const struct device *uart_dev = DEVICE_DT_GET(UART_NODE); |
@@ -149,27 +154,101 @@ static shell_transport_handler_t shell_handler; |
149 | 154 | static void *shell_context; |
150 | 155 | #endif |
151 | 156 |
|
| 157 | +#ifdef CONFIG_DEBUG_NRF_ETR_BACKEND_RTT |
| 158 | + |
| 159 | +#define RTT_LOCK() \ |
| 160 | + COND_CODE_0(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, (SEGGER_RTT_LOCK()), ()) |
| 161 | + |
| 162 | +#define RTT_UNLOCK() \ |
| 163 | + COND_CODE_0(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, (SEGGER_RTT_UNLOCK()), ()) |
| 164 | + |
| 165 | +static uint8_t rtt_buf[COND_CODE_0(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, (1), |
| 166 | + (CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER_SIZE))]; |
| 167 | + |
| 168 | +static volatile bool rtt_host_present; |
| 169 | + |
| 170 | +static void rtt_on_failed_write(int retry_cnt, bool in_panic) |
| 171 | +{ |
| 172 | + if (retry_cnt == 0) { |
| 173 | + rtt_host_present = false; |
| 174 | + } else if (in_panic) { |
| 175 | + k_busy_wait(USEC_PER_MSEC * CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_RETRY_DELAY_MS); |
| 176 | + } else { |
| 177 | + k_msleep(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_RETRY_DELAY_MS); |
| 178 | + } |
| 179 | +} |
| 180 | + |
| 181 | +static void rtt_on_write(int retry_cnt, bool in_panic) |
| 182 | +{ |
| 183 | + rtt_host_present = true; |
| 184 | + if (use_blocking) { |
| 185 | + /* In panic mode block on each write until host reads it. This |
| 186 | + * way it is ensured that if system resets all messages are read |
| 187 | + * by the host. While pending on data being read by the host we |
| 188 | + * must also detect situation where host is disconnected. |
| 189 | + */ |
| 190 | + while (SEGGER_RTT_HasDataUp(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER)) { |
| 191 | + rtt_on_failed_write(retry_cnt--, in_panic); |
| 192 | + } |
| 193 | + } |
| 194 | + |
| 195 | +} |
| 196 | + |
| 197 | +static void rtt_write(uint8_t *data, size_t length, bool in_panic) |
| 198 | +{ |
| 199 | + int ret = 0; |
| 200 | + int retry_cnt = CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_RETRY_CNT; |
| 201 | + |
| 202 | + do { |
| 203 | + if (!in_panic) { |
| 204 | + RTT_LOCK(); |
| 205 | + ret = SEGGER_RTT_WriteSkipNoLock(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, |
| 206 | + data, length); |
| 207 | + RTT_UNLOCK(); |
| 208 | + } else { |
| 209 | + ret = SEGGER_RTT_WriteSkipNoLock(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, |
| 210 | + data, length); |
| 211 | + } |
| 212 | + |
| 213 | + if (ret) { |
| 214 | + rtt_on_write(retry_cnt, in_panic); |
| 215 | + } else if (rtt_host_present) { |
| 216 | + retry_cnt--; |
| 217 | + rtt_on_failed_write(retry_cnt, in_panic); |
| 218 | + } else { |
| 219 | + } |
| 220 | + } while ((ret == 0) && rtt_host_present); |
| 221 | +} |
| 222 | +#endif /* CONFIG_DEBUG_NRF_ETR_BACKEND_RTT */ |
| 223 | + |
152 | 224 | static int log_output_func(uint8_t *buf, size_t size, void *ctx) |
153 | 225 | { |
154 | | - if (use_async_uart) { |
155 | | - int err; |
156 | | - static uint8_t *tx_buf = (uint8_t *)frame_buf0; |
| 226 | + ARG_UNUSED(ctx); |
157 | 227 |
|
158 | | - err = k_sem_take(&uart_sem, K_FOREVER); |
159 | | - __ASSERT_NO_MSG(err >= 0); |
| 228 | + if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_BACKEND_UART)) { |
| 229 | + if (use_blocking) { |
| 230 | + for (int i = 0; i < size; i++) { |
| 231 | + uart_poll_out(uart_dev, buf[i]); |
| 232 | + } |
| 233 | + } else { |
| 234 | + int err; |
| 235 | + static uint8_t *tx_buf = (uint8_t *)frame_buf0; |
160 | 236 |
|
161 | | - memcpy(tx_buf, buf, size); |
| 237 | + err = k_sem_take(&uart_sem, K_FOREVER); |
| 238 | + __ASSERT_NO_MSG(err >= 0); |
162 | 239 |
|
163 | | - err = uart_tx(uart_dev, tx_buf, size, SYS_FOREVER_US); |
164 | | - __ASSERT_NO_MSG(err >= 0); |
| 240 | + memcpy(tx_buf, buf, size); |
165 | 241 |
|
166 | | - tx_buf = (tx_buf == (uint8_t *)frame_buf0) ? |
167 | | - (uint8_t *)frame_buf1 : (uint8_t *)frame_buf0; |
168 | | - } else { |
169 | | - for (int i = 0; i < size; i++) { |
170 | | - uart_poll_out(uart_dev, buf[i]); |
| 242 | + err = uart_tx(uart_dev, tx_buf, size, SYS_FOREVER_US); |
| 243 | + __ASSERT_NO_MSG(err >= 0); |
| 244 | + |
| 245 | + tx_buf = (tx_buf == (uint8_t *)frame_buf0) ? |
| 246 | + (uint8_t *)frame_buf1 : (uint8_t *)frame_buf0; |
171 | 247 | } |
172 | 248 | } |
| 249 | +#ifdef CONFIG_DEBUG_NRF_ETR_BACKEND_RTT |
| 250 | + rtt_write(buf, size, use_blocking); |
| 251 | +#endif |
173 | 252 |
|
174 | 253 | return size; |
175 | 254 | } |
@@ -528,16 +607,16 @@ static void dump_frame(uint8_t *buf) |
528 | 607 | { |
529 | 608 | int err; |
530 | 609 |
|
531 | | - if (use_async_uart) { |
| 610 | + if (use_blocking) { |
| 611 | + for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE; i++) { |
| 612 | + uart_poll_out(uart_dev, buf[i]); |
| 613 | + } |
| 614 | + } else { |
532 | 615 | err = k_sem_take(&uart_sem, K_FOREVER); |
533 | 616 | __ASSERT_NO_MSG(err >= 0); |
534 | 617 |
|
535 | 618 | err = uart_tx(uart_dev, buf, CORESIGHT_TRACE_FRAME_SIZE, SYS_FOREVER_US); |
536 | 619 | __ASSERT_NO_MSG(err >= 0); |
537 | | - } else { |
538 | | - for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE; i++) { |
539 | | - uart_poll_out(uart_dev, buf[i]); |
540 | | - } |
541 | 620 | } |
542 | 621 | } |
543 | 622 |
|
@@ -593,7 +672,7 @@ static void process(void) |
593 | 672 | } |
594 | 673 | } else { |
595 | 674 | dump_frame((uint8_t *)frame_buf); |
596 | | - frame_buf = (use_async_uart && (frame_buf == frame_buf0)) ? |
| 675 | + frame_buf = (!use_blocking && (frame_buf == frame_buf0)) ? |
597 | 676 | frame_buf1 : frame_buf0; |
598 | 677 | } |
599 | 678 | } |
@@ -649,7 +728,7 @@ void debug_nrf_etr_flush(void) |
649 | 728 | /* Set flag which forces uart to use blocking polling out instead of |
650 | 729 | * asynchronous API. |
651 | 730 | */ |
652 | | - use_async_uart = false; |
| 731 | + use_blocking = true; |
653 | 732 | uint32_t k = irq_lock(); |
654 | 733 |
|
655 | 734 | /* Repeat arbitrary number of times to ensure that all that is flushed. */ |
@@ -754,17 +833,23 @@ static void tbm_event_handler(nrf_tbm_event_t event) |
754 | 833 |
|
755 | 834 | int etr_process_init(void) |
756 | 835 | { |
| 836 | +#ifdef CONFIG_DEBUG_NRF_ETR_BACKEND_RTT |
| 837 | + if (CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER > 0) { |
| 838 | + SEGGER_RTT_ConfigUpBuffer(CONFIG_DEBUG_NRF_ETR_BACKEND_RTT_BUFFER, "stm_logger", |
| 839 | + rtt_buf, sizeof(rtt_buf), |
| 840 | + SEGGER_RTT_MODE_NO_BLOCK_SKIP); |
| 841 | + } |
| 842 | +#endif |
757 | 843 | int err; |
758 | 844 |
|
759 | | - k_sem_init(&uart_sem, 1, 1); |
760 | | - |
761 | | - err = uart_callback_set(uart_dev, uart_event_handler, NULL); |
762 | | - use_async_uart = (err == 0); |
763 | | - |
| 845 | + if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_BACKEND_UART)) { |
| 846 | + err = uart_callback_set(uart_dev, uart_event_handler, NULL); |
| 847 | + use_blocking = (err != 0); |
| 848 | + k_sem_init(&uart_sem, 1, 1); |
| 849 | + } |
764 | 850 | static const nrfx_tbm_config_t config = {.size = wsize_mask}; |
765 | 851 |
|
766 | 852 | nrfx_tbm_init(&config, tbm_event_handler); |
767 | | - |
768 | 853 | IRQ_CONNECT(DT_IRQN(DT_NODELABEL(tbm)), DT_IRQ(DT_NODELABEL(tbm), priority), |
769 | 854 | nrfx_isr, nrfx_tbm_irq_handler, 0); |
770 | 855 | irq_enable(DT_IRQN(DT_NODELABEL(tbm))); |
|
0 commit comments