|
1 | 1 | /* |
2 | 2 | * Copyright (c) The mldsa-native project authors |
3 | 3 | * Copyright (c) The mlkem-native project authors |
4 | | - * Copyright (c) 2022 Arm Limited |
5 | 4 | * Copyright (c) 2020 Dougall Johnson |
| 5 | + * Copyright (c) 2022 Arm Limited |
6 | 6 | * SPDX-License-Identifier: MIT |
7 | 7 | * |
8 | 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
@@ -95,9 +95,52 @@ uint64_t get_cyclecounter(void) |
95 | 95 | return retval; |
96 | 96 | } |
97 | 97 |
|
98 | | -#else /* (!__x86_64__ && __AARCH64EL__) || _M_ARM64 */ |
99 | | -#error PMU_CYCLES option only supported on x86_64 and AArch64 |
100 | | -#endif /* !__x86_64__ && !(__AARCH64EL__ || _M_ARM64) */ |
| 98 | +#elif defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8_1M_MAIN__) |
| 99 | +#include <ARMCM55.h> |
| 100 | +#include <system_ARMCM55.h> |
| 101 | +#include "pmu_armv8.h" |
| 102 | + |
| 103 | +void enable_cyclecounter(void) |
| 104 | +{ |
| 105 | + CoreDebug->DEMCR |= CoreDebug_DEMCR_TRCENA_Msk; |
| 106 | + ARM_PMU_Enable(); |
| 107 | + ARM_PMU_CYCCNT_Reset(); |
| 108 | + ARM_PMU_CNTR_Enable(PMU_CNTENSET_CCNTR_ENABLE_Msk); |
| 109 | +} |
| 110 | + |
| 111 | +void disable_cyclecounter(void) |
| 112 | +{ |
| 113 | + ARM_PMU_CNTR_Disable(PMU_CNTENSET_CCNTR_ENABLE_Msk); |
| 114 | + ARM_PMU_Disable(); |
| 115 | +} |
| 116 | + |
| 117 | +uint64_t get_cyclecounter(void) { return ARM_PMU_Get_CCNTR(); } |
| 118 | + |
| 119 | +#elif defined(__riscv) |
| 120 | + |
| 121 | +void enable_cyclecounter(void) {} |
| 122 | + |
| 123 | +void disable_cyclecounter(void) {} |
| 124 | + |
| 125 | +uint64_t get_cyclecounter(void) |
| 126 | +{ |
| 127 | +#if (__riscv_xlen == 32) |
| 128 | + uint32_t lo, hi; |
| 129 | + __asm__ volatile("rdcycle %0" : "=r"(lo)); |
| 130 | + __asm__ volatile("rdcycleh %0" : "=r"(hi)); |
| 131 | + return (((uint64_t)hi) << 32) | ((uint64_t)lo); |
| 132 | +#else /* __riscv_xlen == 32 */ |
| 133 | + uint64_t retval; |
| 134 | + __asm__ volatile("rdcycle %0" : "=r"(retval)); |
| 135 | + return retval; |
| 136 | +#endif /* __riscv_xlen != 32 */ |
| 137 | +} |
| 138 | + |
| 139 | +#else /* !__x86_64__ && !(__AARCH64EL__ || _M_ARM64) && !(__ARM_ARCH_8M_MAIN__ \ |
| 140 | + || __ARM_ARCH_8_1M_MAIN__) && __riscv */ |
| 141 | +#error PMU_CYCLES option only supported on x86_64, AArch64, Armv8-M, and RISC-V |
| 142 | +#endif /* !__x86_64__ && !(__AARCH64EL__ || _M_ARM64) && \ |
| 143 | + !(__ARM_ARCH_8M_MAIN__ || __ARM_ARCH_8_1M_MAIN__) && !__riscv */ |
101 | 144 |
|
102 | 145 | #elif defined(PERF_CYCLES) |
103 | 146 |
|
@@ -154,10 +197,8 @@ uint64_t get_cyclecounter(void) |
154 | 197 | return (uint64_t)cpu_cycles; |
155 | 198 | } |
156 | 199 | #elif defined(MAC_CYCLES) |
157 | | - |
158 | 200 | /* Based on @[m1cycles] */ |
159 | 201 |
|
160 | | - |
161 | 202 | #include <dlfcn.h> |
162 | 203 | #include <pthread.h> |
163 | 204 | #include <stdio.h> |
|
0 commit comments