|
| 1 | +/* |
| 2 | + * Copyright (c) 2023 KNS Group LLC (YADRO) |
| 3 | + * Copyright (c) 2020 Yonatan Goldschmidt <[email protected]> |
| 4 | + * |
| 5 | + * SPDX-License-Identifier: Apache-2.0 |
| 6 | + */ |
| 7 | + |
| 8 | +#include <zephyr/kernel.h> |
| 9 | +#include <zephyr/init.h> |
| 10 | +#include <zephyr/arch/cpu.h> |
| 11 | +#include <zephyr/shell/shell.h> |
| 12 | +#include <zephyr/shell/shell_uart.h> |
| 13 | +#include <stdio.h> |
| 14 | +#include <stdlib.h> |
| 15 | + |
| 16 | +size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size); |
| 17 | + |
| 18 | +struct perf_data_t { |
| 19 | + struct k_timer timer; |
| 20 | + |
| 21 | + const struct shell *sh; |
| 22 | + |
| 23 | + struct k_work_delayable dwork; |
| 24 | + |
| 25 | + size_t idx; |
| 26 | + uintptr_t buf[CONFIG_PROFILING_PERF_BUFFER_SIZE]; |
| 27 | + bool buf_full; |
| 28 | +}; |
| 29 | + |
| 30 | +#define PERF_EVENT_TRACING_BUF_OVERFLOW (1 << 0) |
| 31 | + |
| 32 | +static struct perf_data_t perf_data = { |
| 33 | + .idx = 0, |
| 34 | +}; |
| 35 | + |
| 36 | +static void perf_tracer(struct k_timer *timer) |
| 37 | +{ |
| 38 | + struct perf_data_t *perf_data_ptr = |
| 39 | + (struct perf_data_t *)k_timer_user_data_get(timer); |
| 40 | + |
| 41 | + size_t trace_length = 0; |
| 42 | + |
| 43 | + if (++perf_data_ptr->idx < CONFIG_PROFILING_PERF_BUFFER_SIZE) { |
| 44 | + trace_length = arch_perf_current_stack_trace( |
| 45 | + perf_data_ptr->buf + perf_data_ptr->idx, |
| 46 | + CONFIG_PROFILING_PERF_BUFFER_SIZE - perf_data_ptr->idx); |
| 47 | + } |
| 48 | + |
| 49 | + if (trace_length != 0) { |
| 50 | + perf_data_ptr->buf[perf_data_ptr->idx - 1] = trace_length; |
| 51 | + perf_data_ptr->idx += trace_length; |
| 52 | + } else { |
| 53 | + --perf_data_ptr->idx; |
| 54 | + perf_data_ptr->buf_full = true; |
| 55 | + k_timer_stop(timer); |
| 56 | + k_work_reschedule(&perf_data_ptr->dwork, K_NO_WAIT); |
| 57 | + } |
| 58 | +} |
| 59 | + |
| 60 | +static void perf_dwork_handler(struct k_work *work) |
| 61 | +{ |
| 62 | + struct k_work_delayable *dwork = k_work_delayable_from_work(work); |
| 63 | + struct perf_data_t *perf_data_ptr = CONTAINER_OF(dwork, struct perf_data_t, dwork); |
| 64 | + |
| 65 | + if (perf_data_ptr->buf_full) { |
| 66 | + shell_error(perf_data_ptr->sh, "Perf buf overflow!"); |
| 67 | + } else { |
| 68 | + k_timer_stop(&perf_data_ptr->timer); |
| 69 | + shell_print(perf_data_ptr->sh, "Perf done!"); |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +static int perf_init(void) |
| 74 | +{ |
| 75 | + k_timer_init(&perf_data.timer, perf_tracer, NULL); |
| 76 | + k_work_init_delayable(&perf_data.dwork, perf_dwork_handler); |
| 77 | + |
| 78 | + return 0; |
| 79 | +} |
| 80 | + |
| 81 | +int cmd_perf_record(const struct shell *sh, size_t argc, char **argv) |
| 82 | +{ |
| 83 | + if (k_work_delayable_is_pending(&perf_data.dwork)) { |
| 84 | + shell_warn(sh, "Perf is already running"); |
| 85 | + return -EINPROGRESS; |
| 86 | + } |
| 87 | + |
| 88 | + if (perf_data.buf_full) { |
| 89 | + shell_warn(sh, "Perf buffer is full"); |
| 90 | + return -ENOBUFS; |
| 91 | + } |
| 92 | + |
| 93 | + k_timeout_t duration = K_MSEC(strtoll(argv[1], NULL, 10)); |
| 94 | + k_timeout_t period = K_NSEC(1000000000 / strtoll(argv[2], NULL, 10)); |
| 95 | + |
| 96 | + perf_data.sh = sh; |
| 97 | + |
| 98 | + k_timer_user_data_set(&perf_data.timer, &perf_data); |
| 99 | + k_timer_start(&perf_data.timer, K_NO_WAIT, period); |
| 100 | + |
| 101 | + k_work_schedule(&perf_data.dwork, duration); |
| 102 | + |
| 103 | + shell_print(sh, "Enabled perf"); |
| 104 | + |
| 105 | + return 0; |
| 106 | +} |
| 107 | + |
| 108 | +int cmd_perf_print(const struct shell *sh, size_t argc, char **argv) |
| 109 | +{ |
| 110 | + if (k_work_delayable_is_pending(&perf_data.dwork)) { |
| 111 | + shell_warn(sh, "Perf is already running"); |
| 112 | + return -EINPROGRESS; |
| 113 | + } |
| 114 | + |
| 115 | + shell_print(sh, "Perf buf length %zu", perf_data.idx); |
| 116 | + for (size_t i = 0; i < perf_data.idx; i++) { |
| 117 | + shell_print(sh, "%016lx", perf_data.buf[i]); |
| 118 | + } |
| 119 | + |
| 120 | + perf_data.idx = 0; |
| 121 | + |
| 122 | + return 0; |
| 123 | +} |
| 124 | + |
| 125 | +static int cmd_perf(const struct shell *sh, size_t argc, char **argv) |
| 126 | +{ |
| 127 | + ARG_UNUSED(argc); |
| 128 | + ARG_UNUSED(argv); |
| 129 | + |
| 130 | + shell_print(sh, "perfy"); |
| 131 | + return 0; |
| 132 | +} |
| 133 | + |
| 134 | +#define CMD_HELP_RECORD \ |
| 135 | + "Start recording for <duration> ms on <frequency> Hz\n" \ |
| 136 | + "Usage: record <duration> <frequency>\n" |
| 137 | + |
| 138 | +SHELL_STATIC_SUBCMD_SET_CREATE(m_sub_perf, |
| 139 | + SHELL_CMD_ARG(record, NULL, CMD_HELP_RECORD, cmd_perf_record, 3, 0), |
| 140 | + SHELL_CMD_ARG(printbuf, NULL, "Print the perf buffer", cmd_perf_print, 0, 0), |
| 141 | + SHELL_SUBCMD_SET_END |
| 142 | +); |
| 143 | +SHELL_CMD_ARG_REGISTER(perf, &m_sub_perf, "Perf!", cmd_perf, 0, 0); |
| 144 | + |
| 145 | +SYS_INIT(perf_init, APPLICATION, 0); |
0 commit comments