|
20 | 20 | #include <linux/cpu_pm.h>
|
21 | 21 | #include <linux/sched/clock.h>
|
22 | 22 | #include <linux/soc/andes/irq.h>
|
| 23 | +#include <linux/workqueue.h> |
23 | 24 |
|
24 | 25 | #include <asm/errata_list.h>
|
25 | 26 | #include <asm/sbi.h>
|
@@ -114,7 +115,7 @@ struct sbi_pmu_event_data {
|
114 | 115 | };
|
115 | 116 | };
|
116 | 117 |
|
117 |
| -static const struct sbi_pmu_event_data pmu_hw_event_map[] = { |
| 118 | +static struct sbi_pmu_event_data pmu_hw_event_map[] = { |
118 | 119 | [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
|
119 | 120 | SBI_PMU_HW_CPU_CYCLES,
|
120 | 121 | SBI_PMU_EVENT_TYPE_HW, 0}},
|
@@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
148 | 149 | };
|
149 | 150 |
|
150 | 151 | #define C(x) PERF_COUNT_HW_CACHE_##x
|
151 |
| -static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] |
| 152 | +static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] |
152 | 153 | [PERF_COUNT_HW_CACHE_OP_MAX]
|
153 | 154 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
154 | 155 | [C(L1D)] = {
|
@@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
|
293 | 294 | },
|
294 | 295 | };
|
295 | 296 |
|
| 297 | +static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata) |
| 298 | +{ |
| 299 | + struct sbiret ret; |
| 300 | + |
| 301 | + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, |
| 302 | + 0, cmask, 0, edata->event_idx, 0, 0); |
| 303 | + if (!ret.error) { |
| 304 | + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, |
| 305 | + ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); |
| 306 | + } else if (ret.error == SBI_ERR_NOT_SUPPORTED) { |
| 307 | + /* This event cannot be monitored by any counter */ |
| 308 | + edata->event_idx = -EINVAL; |
| 309 | + } |
| 310 | +} |
| 311 | + |
| 312 | +static void pmu_sbi_check_std_events(struct work_struct *work) |
| 313 | +{ |
| 314 | + for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) |
| 315 | + pmu_sbi_check_event(&pmu_hw_event_map[i]); |
| 316 | + |
| 317 | + for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) |
| 318 | + for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) |
| 319 | + for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) |
| 320 | + pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]); |
| 321 | +} |
| 322 | + |
| 323 | +static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events); |
| 324 | + |
296 | 325 | static int pmu_sbi_ctr_get_width(int idx)
|
297 | 326 | {
|
298 | 327 | return pmu_ctr_list[idx].width;
|
@@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
|
478 | 507 | u64 raw_config_val;
|
479 | 508 | int ret;
|
480 | 509 |
|
| 510 | + /* |
| 511 | + * Ensure we are finished checking standard hardware events for |
| 512 | + * validity before allowing userspace to configure any events. |
| 513 | + */ |
| 514 | + flush_work(&check_std_events_work); |
| 515 | + |
481 | 516 | switch (type) {
|
482 | 517 | case PERF_TYPE_HARDWARE:
|
483 | 518 | if (config >= PERF_COUNT_HW_MAX)
|
@@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
|
762 | 797 | * which may include counters that are not enabled yet.
|
763 | 798 | */
|
764 | 799 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
765 |
| - 0, pmu->cmask, 0, 0, 0, 0); |
| 800 | + 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); |
766 | 801 | }
|
767 | 802 |
|
768 | 803 | static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
|
@@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
|
1359 | 1394 | if (ret)
|
1360 | 1395 | goto out_unregister;
|
1361 | 1396 |
|
| 1397 | + /* Asynchronously check which standard events are available */ |
| 1398 | + schedule_work(&check_std_events_work); |
| 1399 | + |
1362 | 1400 | return 0;
|
1363 | 1401 |
|
1364 | 1402 | out_unregister:
|
|
0 commit comments