|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality. |
| 4 | + * |
| 5 | + * Copyright (c) 2024, Rivos Inc. |
| 6 | + */ |
| 7 | + |
| 8 | +#include <stdio.h> |
| 9 | +#include <stdlib.h> |
| 10 | +#include <string.h> |
| 11 | +#include <unistd.h> |
| 12 | +#include <sys/types.h> |
| 13 | +#include "kvm_util.h" |
| 14 | +#include "test_util.h" |
| 15 | +#include "processor.h" |
| 16 | +#include "sbi.h" |
| 17 | + |
| 18 | +/* Maximum counters(firmware + hardware) */ |
| 19 | +#define RISCV_MAX_PMU_COUNTERS 64 |
| 20 | +union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS]; |
| 21 | + |
| 22 | +/* Cache the available counters in a bitmask */ |
| 23 | +static unsigned long counter_mask_available; |
| 24 | + |
| 25 | +static bool illegal_handler_invoked; |
| 26 | + |
| 27 | +unsigned long pmu_csr_read_num(int csr_num) |
| 28 | +{ |
| 29 | +#define switchcase_csr_read(__csr_num, __val) {\ |
| 30 | + case __csr_num: \ |
| 31 | + __val = csr_read(__csr_num); \ |
| 32 | + break; } |
| 33 | +#define switchcase_csr_read_2(__csr_num, __val) {\ |
| 34 | + switchcase_csr_read(__csr_num + 0, __val) \ |
| 35 | + switchcase_csr_read(__csr_num + 1, __val)} |
| 36 | +#define switchcase_csr_read_4(__csr_num, __val) {\ |
| 37 | + switchcase_csr_read_2(__csr_num + 0, __val) \ |
| 38 | + switchcase_csr_read_2(__csr_num + 2, __val)} |
| 39 | +#define switchcase_csr_read_8(__csr_num, __val) {\ |
| 40 | + switchcase_csr_read_4(__csr_num + 0, __val) \ |
| 41 | + switchcase_csr_read_4(__csr_num + 4, __val)} |
| 42 | +#define switchcase_csr_read_16(__csr_num, __val) {\ |
| 43 | + switchcase_csr_read_8(__csr_num + 0, __val) \ |
| 44 | + switchcase_csr_read_8(__csr_num + 8, __val)} |
| 45 | +#define switchcase_csr_read_32(__csr_num, __val) {\ |
| 46 | + switchcase_csr_read_16(__csr_num + 0, __val) \ |
| 47 | + switchcase_csr_read_16(__csr_num + 16, __val)} |
| 48 | + |
| 49 | + unsigned long ret = 0; |
| 50 | + |
| 51 | + switch (csr_num) { |
| 52 | + switchcase_csr_read_32(CSR_CYCLE, ret) |
| 53 | + switchcase_csr_read_32(CSR_CYCLEH, ret) |
| 54 | + default : |
| 55 | + break; |
| 56 | + } |
| 57 | + |
| 58 | + return ret; |
| 59 | +#undef switchcase_csr_read_32 |
| 60 | +#undef switchcase_csr_read_16 |
| 61 | +#undef switchcase_csr_read_8 |
| 62 | +#undef switchcase_csr_read_4 |
| 63 | +#undef switchcase_csr_read_2 |
| 64 | +#undef switchcase_csr_read |
| 65 | +} |
| 66 | + |
| 67 | +static inline void dummy_func_loop(uint64_t iter) |
| 68 | +{ |
| 69 | + int i = 0; |
| 70 | + |
| 71 | + while (i < iter) { |
| 72 | + asm volatile("nop"); |
| 73 | + i++; |
| 74 | + } |
| 75 | +} |
| 76 | + |
| 77 | +static void start_counter(unsigned long counter, unsigned long start_flags, |
| 78 | + unsigned long ival) |
| 79 | +{ |
| 80 | + struct sbiret ret; |
| 81 | + |
| 82 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags, |
| 83 | + ival, 0, 0); |
| 84 | + __GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter); |
| 85 | +} |
| 86 | + |
| 87 | +/* This should be invoked only for reset counter use case */ |
| 88 | +static void stop_reset_counter(unsigned long counter, unsigned long stop_flags) |
| 89 | +{ |
| 90 | + struct sbiret ret; |
| 91 | + |
| 92 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, |
| 93 | + stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); |
| 94 | + __GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED, |
| 95 | + "Unable to stop counter %ld\n", counter); |
| 96 | +} |
| 97 | + |
| 98 | +static void stop_counter(unsigned long counter, unsigned long stop_flags) |
| 99 | +{ |
| 100 | + struct sbiret ret; |
| 101 | + |
| 102 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags, |
| 103 | + 0, 0, 0); |
| 104 | + __GUEST_ASSERT(ret.error == 0, "Unable to stop counter %ld error %ld\n", |
| 105 | + counter, ret.error); |
| 106 | +} |
| 107 | + |
| 108 | +static void guest_illegal_exception_handler(struct ex_regs *regs) |
| 109 | +{ |
| 110 | + __GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL, |
| 111 | + "Unexpected exception handler %lx\n", regs->cause); |
| 112 | + |
| 113 | + illegal_handler_invoked = true; |
| 114 | + /* skip the trapping instruction */ |
| 115 | + regs->epc += 4; |
| 116 | +} |
| 117 | + |
| 118 | +static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask, |
| 119 | + unsigned long cflags, |
| 120 | + unsigned long event) |
| 121 | +{ |
| 122 | + struct sbiret ret; |
| 123 | + |
| 124 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, |
| 125 | + cflags, event, 0, 0); |
| 126 | + __GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error); |
| 127 | + GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS); |
| 128 | + GUEST_ASSERT(BIT(ret.value) & counter_mask_available); |
| 129 | + |
| 130 | + return ret.value; |
| 131 | +} |
| 132 | + |
| 133 | +static unsigned long get_num_counters(void) |
| 134 | +{ |
| 135 | + struct sbiret ret; |
| 136 | + |
| 137 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0); |
| 138 | + |
| 139 | + __GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU"); |
| 140 | + __GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS, |
| 141 | + "Invalid number of counters %ld\n", ret.value); |
| 142 | + |
| 143 | + return ret.value; |
| 144 | +} |
| 145 | + |
| 146 | +static void update_counter_info(int num_counters) |
| 147 | +{ |
| 148 | + int i = 0; |
| 149 | + struct sbiret ret; |
| 150 | + |
| 151 | + for (i = 0; i < num_counters; i++) { |
| 152 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); |
| 153 | + |
| 154 | + /* There can be gaps in logical counter indicies*/ |
| 155 | + if (ret.error) |
| 156 | + continue; |
| 157 | + GUEST_ASSERT_NE(ret.value, 0); |
| 158 | + |
| 159 | + ctrinfo_arr[i].value = ret.value; |
| 160 | + counter_mask_available |= BIT(i); |
| 161 | + } |
| 162 | + |
| 163 | + GUEST_ASSERT(counter_mask_available > 0); |
| 164 | +} |
| 165 | + |
| 166 | +static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo) |
| 167 | +{ |
| 168 | + struct sbiret ret; |
| 169 | + |
| 170 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0); |
| 171 | + GUEST_ASSERT(ret.error == 0); |
| 172 | + return ret.value; |
| 173 | +} |
| 174 | + |
| 175 | +static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo) |
| 176 | +{ |
| 177 | + unsigned long counter_val = 0; |
| 178 | + |
| 179 | + __GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type); |
| 180 | + |
| 181 | + if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) |
| 182 | + counter_val = pmu_csr_read_num(ctrinfo.csr); |
| 183 | + else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) |
| 184 | + counter_val = read_fw_counter(idx, ctrinfo); |
| 185 | + |
| 186 | + return counter_val; |
| 187 | +} |
| 188 | + |
| 189 | +static void test_pmu_event(unsigned long event) |
| 190 | +{ |
| 191 | + unsigned long counter; |
| 192 | + unsigned long counter_value_pre, counter_value_post; |
| 193 | + unsigned long counter_init_value = 100; |
| 194 | + |
| 195 | + counter = get_counter_index(0, counter_mask_available, 0, event); |
| 196 | + counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); |
| 197 | + |
| 198 | + /* Do not set the initial value */ |
| 199 | + start_counter(counter, 0, 0); |
| 200 | + dummy_func_loop(10000); |
| 201 | + stop_counter(counter, 0); |
| 202 | + |
| 203 | + counter_value_post = read_counter(counter, ctrinfo_arr[counter]); |
| 204 | + __GUEST_ASSERT(counter_value_post > counter_value_pre, |
| 205 | + "Event update verification failed: post [%lx] pre [%lx]\n", |
| 206 | + counter_value_post, counter_value_pre); |
| 207 | + |
| 208 | + /* |
| 209 | + * We can't just update the counter without starting it. |
| 210 | + * Do start/stop twice to simulate that by first initializing to a very |
| 211 | + * high value and a low value after that. |
| 212 | + */ |
| 213 | + start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2); |
| 214 | + stop_counter(counter, 0); |
| 215 | + counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); |
| 216 | + |
| 217 | + start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value); |
| 218 | + stop_counter(counter, 0); |
| 219 | + counter_value_post = read_counter(counter, ctrinfo_arr[counter]); |
| 220 | + __GUEST_ASSERT(counter_value_pre > counter_value_post, |
| 221 | + "Counter reinitialization verification failed : post [%lx] pre [%lx]\n", |
| 222 | + counter_value_post, counter_value_pre); |
| 223 | + |
| 224 | + /* Now set the initial value and compare */ |
| 225 | + start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value); |
| 226 | + dummy_func_loop(10000); |
| 227 | + stop_counter(counter, 0); |
| 228 | + |
| 229 | + counter_value_post = read_counter(counter, ctrinfo_arr[counter]); |
| 230 | + __GUEST_ASSERT(counter_value_post > counter_init_value, |
| 231 | + "Event update verification failed: post [%lx] pre [%lx]\n", |
| 232 | + counter_value_post, counter_init_value); |
| 233 | + |
| 234 | + stop_reset_counter(counter, 0); |
| 235 | +} |
| 236 | + |
| 237 | +static void test_invalid_event(void) |
| 238 | +{ |
| 239 | + struct sbiret ret; |
| 240 | + unsigned long event = 0x1234; /* A random event */ |
| 241 | + |
| 242 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0, |
| 243 | + counter_mask_available, 0, event, 0, 0); |
| 244 | + GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED); |
| 245 | +} |
| 246 | + |
| 247 | +static void test_pmu_events(void) |
| 248 | +{ |
| 249 | + int num_counters = 0; |
| 250 | + |
| 251 | + /* Get the counter details */ |
| 252 | + num_counters = get_num_counters(); |
| 253 | + update_counter_info(num_counters); |
| 254 | + |
| 255 | + /* Sanity testing for any random invalid event */ |
| 256 | + test_invalid_event(); |
| 257 | + |
| 258 | + /* Only these two events are guaranteed to be present */ |
| 259 | + test_pmu_event(SBI_PMU_HW_CPU_CYCLES); |
| 260 | + test_pmu_event(SBI_PMU_HW_INSTRUCTIONS); |
| 261 | + |
| 262 | + GUEST_DONE(); |
| 263 | +} |
| 264 | + |
| 265 | +static void test_pmu_basic_sanity(void) |
| 266 | +{ |
| 267 | + long out_val = 0; |
| 268 | + bool probe; |
| 269 | + struct sbiret ret; |
| 270 | + int num_counters = 0, i; |
| 271 | + union sbi_pmu_ctr_info ctrinfo; |
| 272 | + |
| 273 | + probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val); |
| 274 | + GUEST_ASSERT(probe && out_val == 1); |
| 275 | + |
| 276 | + num_counters = get_num_counters(); |
| 277 | + |
| 278 | + for (i = 0; i < num_counters; i++) { |
| 279 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, |
| 280 | + 0, 0, 0, 0, 0); |
| 281 | + |
| 282 | + /* There can be gaps in logical counter indicies*/ |
| 283 | + if (ret.error) |
| 284 | + continue; |
| 285 | + GUEST_ASSERT_NE(ret.value, 0); |
| 286 | + |
| 287 | + ctrinfo.value = ret.value; |
| 288 | + |
| 289 | + /** |
| 290 | + * Accessibility check of hardware and read capability of firmware counters. |
| 291 | + * The spec doesn't mandate any initial value. No need to check any value. |
| 292 | + */ |
| 293 | + if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) { |
| 294 | + pmu_csr_read_num(ctrinfo.csr); |
| 295 | + GUEST_ASSERT(illegal_handler_invoked); |
| 296 | + } else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) { |
| 297 | + read_fw_counter(i, ctrinfo); |
| 298 | + } |
| 299 | + } |
| 300 | + |
| 301 | + GUEST_DONE(); |
| 302 | +} |
| 303 | + |
| 304 | +static void run_vcpu(struct kvm_vcpu *vcpu) |
| 305 | +{ |
| 306 | + struct ucall uc; |
| 307 | + |
| 308 | + vcpu_run(vcpu); |
| 309 | + switch (get_ucall(vcpu, &uc)) { |
| 310 | + case UCALL_ABORT: |
| 311 | + REPORT_GUEST_ASSERT(uc); |
| 312 | + break; |
| 313 | + case UCALL_DONE: |
| 314 | + case UCALL_SYNC: |
| 315 | + break; |
| 316 | + default: |
| 317 | + TEST_FAIL("Unknown ucall %lu", uc.cmd); |
| 318 | + break; |
| 319 | + } |
| 320 | +} |
| 321 | + |
| 322 | +void test_vm_destroy(struct kvm_vm *vm) |
| 323 | +{ |
| 324 | + memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS); |
| 325 | + counter_mask_available = 0; |
| 326 | + kvm_vm_free(vm); |
| 327 | +} |
| 328 | + |
| 329 | +static void test_vm_basic_test(void *guest_code) |
| 330 | +{ |
| 331 | + struct kvm_vm *vm; |
| 332 | + struct kvm_vcpu *vcpu; |
| 333 | + |
| 334 | + vm = vm_create_with_one_vcpu(&vcpu, guest_code); |
| 335 | + __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), |
| 336 | + "SBI PMU not available, skipping test"); |
| 337 | + vm_init_vector_tables(vm); |
| 338 | + /* Illegal instruction handler is required to verify read access without configuration */ |
| 339 | + vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler); |
| 340 | + |
| 341 | + vcpu_init_vector_tables(vcpu); |
| 342 | + run_vcpu(vcpu); |
| 343 | + |
| 344 | + test_vm_destroy(vm); |
| 345 | +} |
| 346 | + |
| 347 | +static void test_vm_events_test(void *guest_code) |
| 348 | +{ |
| 349 | + struct kvm_vm *vm = NULL; |
| 350 | + struct kvm_vcpu *vcpu = NULL; |
| 351 | + |
| 352 | + vm = vm_create_with_one_vcpu(&vcpu, guest_code); |
| 353 | + __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), |
| 354 | + "SBI PMU not available, skipping test"); |
| 355 | + run_vcpu(vcpu); |
| 356 | + |
| 357 | + test_vm_destroy(vm); |
| 358 | +} |
| 359 | + |
| 360 | +int main(void) |
| 361 | +{ |
| 362 | + test_vm_basic_test(test_pmu_basic_sanity); |
| 363 | + pr_info("SBI PMU basic test : PASS\n"); |
| 364 | + |
| 365 | + test_vm_events_test(test_pmu_events); |
| 366 | + pr_info("SBI PMU event verification test : PASS\n"); |
| 367 | + |
| 368 | + return 0; |
| 369 | +} |
0 commit comments