Skip to content

Commit 29227d6

Browse files
rmurphy-armwilldeacon
authored andcommitted
arm64: perf: Clean up enable/disable calls
Reading this code bordered on painful, what with all the repetition and pointless return values. More fundamentally, dribbling the hardware enables and disables in one bit at a time incurs needless system register overhead for chained events and on reset. We already use bitmask values for the KVM hooks, so consolidate all the register accesses to match, and make a reasonable saving in both source and object code. Signed-off-by: Robin Murphy <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent 0623682 commit 29227d6

File tree

1 file changed

+35
-52
lines changed

1 file changed

+35
-52
lines changed

arch/arm64/kernel/perf_event.c

Lines changed: 35 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -450,86 +450,74 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
450450
}
451451
}
452452

453-
static inline int armv8pmu_enable_counter(int idx)
453+
static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
454454
{
455-
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
456-
write_sysreg(BIT(counter), pmcntenset_el0);
457-
return idx;
455+
int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
456+
u32 mask = BIT(counter);
457+
458+
if (armv8pmu_event_is_chained(event))
459+
mask |= BIT(counter - 1);
460+
return mask;
461+
}
462+
463+
static inline void armv8pmu_enable_counter(u32 mask)
464+
{
465+
write_sysreg(mask, pmcntenset_el0);
458466
}
459467

460468
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
461469
{
462470
struct perf_event_attr *attr = &event->attr;
463-
int idx = event->hw.idx;
464-
u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
465-
466-
if (armv8pmu_event_is_chained(event))
467-
counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
471+
u32 mask = armv8pmu_event_cnten_mask(event);
468472

469-
kvm_set_pmu_events(counter_bits, attr);
473+
kvm_set_pmu_events(mask, attr);
470474

471475
/* We rely on the hypervisor switch code to enable guest counters */
472-
if (!kvm_pmu_counter_deferred(attr)) {
473-
armv8pmu_enable_counter(idx);
474-
if (armv8pmu_event_is_chained(event))
475-
armv8pmu_enable_counter(idx - 1);
476-
}
476+
if (!kvm_pmu_counter_deferred(attr))
477+
armv8pmu_enable_counter(mask);
477478
}
478479

479-
static inline int armv8pmu_disable_counter(int idx)
480+
static inline void armv8pmu_disable_counter(u32 mask)
480481
{
481-
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
482-
write_sysreg(BIT(counter), pmcntenclr_el0);
483-
return idx;
482+
write_sysreg(mask, pmcntenclr_el0);
484483
}
485484

486485
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
487486
{
488-
struct hw_perf_event *hwc = &event->hw;
489487
struct perf_event_attr *attr = &event->attr;
490-
int idx = hwc->idx;
491-
u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
488+
u32 mask = armv8pmu_event_cnten_mask(event);
492489

493-
if (armv8pmu_event_is_chained(event))
494-
counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
495-
496-
kvm_clr_pmu_events(counter_bits);
490+
kvm_clr_pmu_events(mask);
497491

498492
/* We rely on the hypervisor switch code to disable guest counters */
499-
if (!kvm_pmu_counter_deferred(attr)) {
500-
if (armv8pmu_event_is_chained(event))
501-
armv8pmu_disable_counter(idx - 1);
502-
armv8pmu_disable_counter(idx);
503-
}
493+
if (!kvm_pmu_counter_deferred(attr))
494+
armv8pmu_disable_counter(mask);
504495
}
505496

506-
static inline int armv8pmu_enable_intens(int idx)
497+
static inline void armv8pmu_enable_intens(u32 mask)
507498
{
508-
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
509-
write_sysreg(BIT(counter), pmintenset_el1);
510-
return idx;
499+
write_sysreg(mask, pmintenset_el1);
511500
}
512501

513-
static inline int armv8pmu_enable_event_irq(struct perf_event *event)
502+
static inline void armv8pmu_enable_event_irq(struct perf_event *event)
514503
{
515-
return armv8pmu_enable_intens(event->hw.idx);
504+
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
505+
armv8pmu_enable_intens(BIT(counter));
516506
}
517507

518-
static inline int armv8pmu_disable_intens(int idx)
508+
static inline void armv8pmu_disable_intens(u32 mask)
519509
{
520-
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
521-
write_sysreg(BIT(counter), pmintenclr_el1);
510+
write_sysreg(mask, pmintenclr_el1);
522511
isb();
523512
/* Clear the overflow flag in case an interrupt is pending. */
524-
write_sysreg(BIT(counter), pmovsclr_el0);
513+
write_sysreg(mask, pmovsclr_el0);
525514
isb();
526-
527-
return idx;
528515
}
529516

530-
static inline int armv8pmu_disable_event_irq(struct perf_event *event)
517+
static inline void armv8pmu_disable_event_irq(struct perf_event *event)
531518
{
532-
return armv8pmu_disable_intens(event->hw.idx);
519+
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
520+
armv8pmu_disable_intens(BIT(counter));
533521
}
534522

535523
static inline u32 armv8pmu_getreset_flags(void)
@@ -814,14 +802,9 @@ static int armv8pmu_filter_match(struct perf_event *event)
814802

815803
static void armv8pmu_reset(void *info)
816804
{
817-
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
818-
u32 idx, nb_cnt = cpu_pmu->num_events;
819-
820805
/* The counter and interrupt enable registers are unknown at reset. */
821-
for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
822-
armv8pmu_disable_counter(idx);
823-
armv8pmu_disable_intens(idx);
824-
}
806+
armv8pmu_disable_counter(U32_MAX);
807+
armv8pmu_disable_intens(U32_MAX);
825808

826809
/* Clear the counters we flip at guest entry/exit */
827810
kvm_clr_pmu_events(U32_MAX);

0 commit comments

Comments
 (0)