@@ -450,86 +450,74 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
450
450
}
451
451
}
452
452
453
- static inline int armv8pmu_enable_counter ( int idx )
453
+ static u32 armv8pmu_event_cnten_mask ( struct perf_event * event )
454
454
{
455
- u32 counter = ARMV8_IDX_TO_COUNTER (idx );
456
- write_sysreg (BIT (counter ), pmcntenset_el0 );
457
- return idx ;
455
+ int counter = ARMV8_IDX_TO_COUNTER (event -> hw .idx );
456
+ u32 mask = BIT (counter );
457
+
458
+ if (armv8pmu_event_is_chained (event ))
459
+ mask |= BIT (counter - 1 );
460
+ return mask ;
461
+ }
462
+
463
+ static inline void armv8pmu_enable_counter (u32 mask )
464
+ {
465
+ write_sysreg (mask , pmcntenset_el0 );
458
466
}
459
467
460
468
static inline void armv8pmu_enable_event_counter (struct perf_event * event )
461
469
{
462
470
struct perf_event_attr * attr = & event -> attr ;
463
- int idx = event -> hw .idx ;
464
- u32 counter_bits = BIT (ARMV8_IDX_TO_COUNTER (idx ));
465
-
466
- if (armv8pmu_event_is_chained (event ))
467
- counter_bits |= BIT (ARMV8_IDX_TO_COUNTER (idx - 1 ));
471
+ u32 mask = armv8pmu_event_cnten_mask (event );
468
472
469
- kvm_set_pmu_events (counter_bits , attr );
473
+ kvm_set_pmu_events (mask , attr );
470
474
471
475
/* We rely on the hypervisor switch code to enable guest counters */
472
- if (!kvm_pmu_counter_deferred (attr )) {
473
- armv8pmu_enable_counter (idx );
474
- if (armv8pmu_event_is_chained (event ))
475
- armv8pmu_enable_counter (idx - 1 );
476
- }
476
+ if (!kvm_pmu_counter_deferred (attr ))
477
+ armv8pmu_enable_counter (mask );
477
478
}
478
479
479
- static inline int armv8pmu_disable_counter (int idx )
480
+ static inline void armv8pmu_disable_counter (u32 mask )
480
481
{
481
- u32 counter = ARMV8_IDX_TO_COUNTER (idx );
482
- write_sysreg (BIT (counter ), pmcntenclr_el0 );
483
- return idx ;
482
+ write_sysreg (mask , pmcntenclr_el0 );
484
483
}
485
484
486
485
static inline void armv8pmu_disable_event_counter (struct perf_event * event )
487
486
{
488
- struct hw_perf_event * hwc = & event -> hw ;
489
487
struct perf_event_attr * attr = & event -> attr ;
490
- int idx = hwc -> idx ;
491
- u32 counter_bits = BIT (ARMV8_IDX_TO_COUNTER (idx ));
488
+ u32 mask = armv8pmu_event_cnten_mask (event );
492
489
493
- if (armv8pmu_event_is_chained (event ))
494
- counter_bits |= BIT (ARMV8_IDX_TO_COUNTER (idx - 1 ));
495
-
496
- kvm_clr_pmu_events (counter_bits );
490
+ kvm_clr_pmu_events (mask );
497
491
498
492
/* We rely on the hypervisor switch code to disable guest counters */
499
- if (!kvm_pmu_counter_deferred (attr )) {
500
- if (armv8pmu_event_is_chained (event ))
501
- armv8pmu_disable_counter (idx - 1 );
502
- armv8pmu_disable_counter (idx );
503
- }
493
+ if (!kvm_pmu_counter_deferred (attr ))
494
+ armv8pmu_disable_counter (mask );
504
495
}
505
496
506
- static inline int armv8pmu_enable_intens (int idx )
497
+ static inline void armv8pmu_enable_intens (u32 mask )
507
498
{
508
- u32 counter = ARMV8_IDX_TO_COUNTER (idx );
509
- write_sysreg (BIT (counter ), pmintenset_el1 );
510
- return idx ;
499
+ write_sysreg (mask , pmintenset_el1 );
511
500
}
512
501
513
- static inline int armv8pmu_enable_event_irq (struct perf_event * event )
502
+ static inline void armv8pmu_enable_event_irq (struct perf_event * event )
514
503
{
515
- return armv8pmu_enable_intens (event -> hw .idx );
504
+ u32 counter = ARMV8_IDX_TO_COUNTER (event -> hw .idx );
505
+ armv8pmu_enable_intens (BIT (counter ));
516
506
}
517
507
518
- static inline int armv8pmu_disable_intens (int idx )
508
+ static inline void armv8pmu_disable_intens (u32 mask )
519
509
{
520
- u32 counter = ARMV8_IDX_TO_COUNTER (idx );
521
- write_sysreg (BIT (counter ), pmintenclr_el1 );
510
+ write_sysreg (mask , pmintenclr_el1 );
522
511
isb ();
523
512
/* Clear the overflow flag in case an interrupt is pending. */
524
- write_sysreg (BIT ( counter ) , pmovsclr_el0 );
513
+ write_sysreg (mask , pmovsclr_el0 );
525
514
isb ();
526
-
527
- return idx ;
528
515
}
529
516
530
- static inline int armv8pmu_disable_event_irq (struct perf_event * event )
517
+ static inline void armv8pmu_disable_event_irq (struct perf_event * event )
531
518
{
532
- return armv8pmu_disable_intens (event -> hw .idx );
519
+ u32 counter = ARMV8_IDX_TO_COUNTER (event -> hw .idx );
520
+ armv8pmu_disable_intens (BIT (counter ));
533
521
}
534
522
535
523
static inline u32 armv8pmu_getreset_flags (void )
@@ -814,14 +802,9 @@ static int armv8pmu_filter_match(struct perf_event *event)
814
802
815
803
static void armv8pmu_reset (void * info )
816
804
{
817
- struct arm_pmu * cpu_pmu = (struct arm_pmu * )info ;
818
- u32 idx , nb_cnt = cpu_pmu -> num_events ;
819
-
820
805
/* The counter and interrupt enable registers are unknown at reset. */
821
- for (idx = ARMV8_IDX_CYCLE_COUNTER ; idx < nb_cnt ; ++ idx ) {
822
- armv8pmu_disable_counter (idx );
823
- armv8pmu_disable_intens (idx );
824
- }
806
+ armv8pmu_disable_counter (U32_MAX );
807
+ armv8pmu_disable_intens (U32_MAX );
825
808
826
809
/* Clear the counters we flip at guest entry/exit */
827
810
kvm_clr_pmu_events (U32_MAX );
0 commit comments