|
39 | 39 | #include <asm/mmu.h>
|
40 | 40 | #include <asm/sysreg.h>
|
41 | 41 |
|
| 42 | +/* |
| 43 | + * Cache if the event is allowed to trace Context information. |
| 44 | + * This allows us to perform the check, i.e, perfmon_capable(), |
| 45 | + * in the context of the event owner, once, during the event_init(). |
| 46 | + */ |
| 47 | +#define SPE_PMU_HW_FLAGS_CX BIT(0) |
| 48 | + |
| 49 | +static void set_spe_event_has_cx(struct perf_event *event) |
| 50 | +{ |
| 51 | + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) |
| 52 | + event->hw.flags |= SPE_PMU_HW_FLAGS_CX; |
| 53 | +} |
| 54 | + |
| 55 | +static bool get_spe_event_has_cx(struct perf_event *event) |
| 56 | +{ |
| 57 | + return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); |
| 58 | +} |
| 59 | + |
42 | 60 | #define ARM_SPE_BUF_PAD_BYTE 0
|
43 | 61 |
|
44 | 62 | struct arm_spe_pmu_buf {
|
@@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
|
272 | 290 | if (!attr->exclude_kernel)
|
273 | 291 | reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
|
274 | 292 |
|
275 |
| - if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) |
| 293 | + if (get_spe_event_has_cx(event)) |
276 | 294 | reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
|
277 | 295 |
|
278 | 296 | return reg;
|
@@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
|
709 | 727 | !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
|
710 | 728 | return -EOPNOTSUPP;
|
711 | 729 |
|
| 730 | + set_spe_event_has_cx(event); |
712 | 731 | reg = arm_spe_event_to_pmscr(event);
|
713 | 732 | if (!perfmon_capable() &&
|
714 | 733 | (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
|
715 |
| - BIT(SYS_PMSCR_EL1_CX_SHIFT) | |
716 | 734 | BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
|
717 | 735 | return -EACCES;
|
718 | 736 |
|
|
0 commit comments