Skip to content

Commit 83a7a4d

Browse files
robherringwilldeacon
authored andcommitted
arm64: perf: Enable PMU counter userspace access for perf event
Arm PMUs can support direct userspace access of counters which allows for low overhead (i.e. no syscall) self-monitoring of tasks. The same feature exists on x86 called 'rdpmc'. Unlike x86, userspace access will only be enabled for thread bound events. This could be extended if needed, but simplifies the implementation and reduces the chances for any information leaks (which the x86 implementation suffers from). PMU EL0 access will be enabled when an event with userspace access is part of the thread's context. This includes when the event is not scheduled on the PMU. There's some additional overhead clearing dirty counters when access is enabled in order to prevent leaking disabled counter data from other tasks. Unlike x86, enabling of userspace access must be requested with a new attr bit: config1:1. If the user requests userspace access with 64-bit counters, then the event open will fail if the h/w doesn't support 64-bit counters. Chaining is not supported with userspace access. The modes for config1 are as follows: config1 = 0 : user access disabled and always 32-bit config1 = 1 : user access disabled and always 64-bit (using chaining if needed) config1 = 2 : user access enabled and always 32-bit config1 = 3 : user access enabled and always 64-bit Based on work by Raphael Gault <[email protected]>, but has been completely re-written. Cc: Will Deacon <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: [email protected] Cc: [email protected] Signed-off-by: Rob Herring <[email protected]> Link: https://lore.kernel.org/r/[email protected] [will: Made armv8pmu_proc_user_access_handler() static] Signed-off-by: Will Deacon <[email protected]>
1 parent e201260 commit 83a7a4d

File tree

1 file changed

+112
-7
lines changed

1 file changed

+112
-7
lines changed

arch/arm64/kernel/perf_event.c

Lines changed: 112 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -285,6 +285,7 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = {
285285

286286
PMU_FORMAT_ATTR(event, "config:0-15");
287287
PMU_FORMAT_ATTR(long, "config1:0");
288+
PMU_FORMAT_ATTR(rdpmc, "config1:1");
288289

289290
static int sysctl_perf_user_access __read_mostly;
290291

@@ -293,9 +294,15 @@ static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
293294
return event->attr.config1 & 0x1;
294295
}
295296

297+
static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
298+
{
299+
return event->attr.config1 & 0x2;
300+
}
301+
296302
static struct attribute *armv8_pmuv3_format_attrs[] = {
297303
&format_attr_event.attr,
298304
&format_attr_long.attr,
305+
&format_attr_rdpmc.attr,
299306
NULL,
300307
};
301308

@@ -364,7 +371,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
364371
*/
365372
#define ARMV8_IDX_CYCLE_COUNTER 0
366373
#define ARMV8_IDX_COUNTER0 1
367-
374+
#define ARMV8_IDX_CYCLE_COUNTER_USER 32
368375

369376
/*
370377
* We unconditionally enable ARMv8.5-PMU long event counter support
@@ -376,18 +383,22 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
376383
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
377384
}
378385

386+
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
387+
{
388+
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
389+
}
390+
379391
/*
380392
* We must chain two programmable counters for 64 bit events,
381393
* except when we have allocated the 64bit cycle counter (for CPU
382-
* cycles event). This must be called only when the event has
383-
* a counter allocated.
394+
* cycles event) or when user space counter access is enabled.
384395
*/
385396
static inline bool armv8pmu_event_is_chained(struct perf_event *event)
386397
{
387398
int idx = event->hw.idx;
388399
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
389400

390-
return !WARN_ON(idx < 0) &&
401+
return !armv8pmu_event_has_user_read(event) &&
391402
armv8pmu_event_is_64bit(event) &&
392403
!armv8pmu_has_long_event(cpu_pmu) &&
393404
(idx != ARMV8_IDX_CYCLE_COUNTER);
@@ -720,6 +731,28 @@ static inline u32 armv8pmu_getreset_flags(void)
720731
return value;
721732
}
722733

734+
static void armv8pmu_disable_user_access(void)
735+
{
736+
write_sysreg(0, pmuserenr_el0);
737+
}
738+
739+
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
740+
{
741+
int i;
742+
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
743+
744+
/* Clear any unused counters to avoid leaking their contents */
745+
for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
746+
if (i == ARMV8_IDX_CYCLE_COUNTER)
747+
write_sysreg(0, pmccntr_el0);
748+
else
749+
armv8pmu_write_evcntr(i, 0);
750+
}
751+
752+
write_sysreg(0, pmuserenr_el0);
753+
write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
754+
}
755+
723756
static void armv8pmu_enable_event(struct perf_event *event)
724757
{
725758
/*
@@ -763,6 +796,14 @@ static void armv8pmu_disable_event(struct perf_event *event)
763796

764797
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
765798
{
799+
struct perf_event_context *task_ctx =
800+
this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx;
801+
802+
if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user)
803+
armv8pmu_enable_user_access(cpu_pmu);
804+
else
805+
armv8pmu_disable_user_access();
806+
766807
/* Enable all counters */
767808
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
768809
}
@@ -880,13 +921,16 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
880921
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
881922
if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
882923
return ARMV8_IDX_CYCLE_COUNTER;
924+
else if (armv8pmu_event_is_64bit(event) &&
925+
armv8pmu_event_want_user_access(event) &&
926+
!armv8pmu_has_long_event(cpu_pmu))
927+
return -EAGAIN;
883928
}
884929

885930
/*
886931
* Otherwise use events counters
887932
*/
888-
if (armv8pmu_event_is_64bit(event) &&
889-
!armv8pmu_has_long_event(cpu_pmu))
933+
if (armv8pmu_event_is_chained(event))
890934
return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
891935
else
892936
return armv8pmu_get_single_idx(cpuc, cpu_pmu);
@@ -902,6 +946,22 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
902946
clear_bit(idx - 1, cpuc->used_mask);
903947
}
904948

949+
static int armv8pmu_user_event_idx(struct perf_event *event)
950+
{
951+
if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
952+
return 0;
953+
954+
/*
955+
* We remap the cycle counter index to 32 to
956+
* match the offset applied to the rest of
957+
* the counter indices.
958+
*/
959+
if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
960+
return ARMV8_IDX_CYCLE_COUNTER_USER;
961+
962+
return event->hw.idx;
963+
}
964+
905965
/*
906966
* Add an event filter to a given event.
907967
*/
@@ -998,6 +1058,25 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
9981058
if (armv8pmu_event_is_64bit(event))
9991059
event->hw.flags |= ARMPMU_EVT_64BIT;
10001060

1061+
/*
1062+
* User events must be allocated into a single counter, and so
1063+
* must not be chained.
1064+
*
1065+
* Most 64-bit events require long counter support, but 64-bit
1066+
* CPU_CYCLES events can be placed into the dedicated cycle
1067+
* counter when this is free.
1068+
*/
1069+
if (armv8pmu_event_want_user_access(event)) {
1070+
if (!(event->attach_state & PERF_ATTACH_TASK))
1071+
return -EINVAL;
1072+
if (armv8pmu_event_is_64bit(event) &&
1073+
(hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
1074+
!armv8pmu_has_long_event(armpmu))
1075+
return -EOPNOTSUPP;
1076+
1077+
event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1078+
}
1079+
10011080
/* Only expose micro/arch events supported by this PMU */
10021081
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
10031082
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
@@ -1106,13 +1185,29 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
11061185
return probe.present ? 0 : -ENODEV;
11071186
}
11081187

1188+
static void armv8pmu_disable_user_access_ipi(void *unused)
1189+
{
1190+
armv8pmu_disable_user_access();
1191+
}
1192+
1193+
static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
1194+
void *buffer, size_t *lenp, loff_t *ppos)
1195+
{
1196+
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1197+
if (ret || !write || sysctl_perf_user_access)
1198+
return ret;
1199+
1200+
on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
1201+
return 0;
1202+
}
1203+
11091204
static struct ctl_table armv8_pmu_sysctl_table[] = {
11101205
{
11111206
.procname = "perf_user_access",
11121207
.data = &sysctl_perf_user_access,
11131208
.maxlen = sizeof(unsigned int),
11141209
.mode = 0644,
1115-
.proc_handler = proc_dointvec_minmax,
1210+
.proc_handler = armv8pmu_proc_user_access_handler,
11161211
.extra1 = SYSCTL_ZERO,
11171212
.extra2 = SYSCTL_ONE,
11181213
},
@@ -1142,6 +1237,8 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
11421237
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
11431238
cpu_pmu->filter_match = armv8pmu_filter_match;
11441239

1240+
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
1241+
11451242
cpu_pmu->name = name;
11461243
cpu_pmu->map_event = map_event;
11471244
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
@@ -1318,6 +1415,14 @@ void arch_perf_update_userpage(struct perf_event *event,
13181415
userpg->cap_user_time = 0;
13191416
userpg->cap_user_time_zero = 0;
13201417
userpg->cap_user_time_short = 0;
1418+
userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
1419+
1420+
if (userpg->cap_user_rdpmc) {
1421+
if (event->hw.flags & ARMPMU_EVT_64BIT)
1422+
userpg->pmc_width = 64;
1423+
else
1424+
userpg->pmc_width = 32;
1425+
}
13211426

13221427
do {
13231428
rd = sched_clock_read_begin(&seq);

0 commit comments

Comments
 (0)