@@ -285,6 +285,17 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
285
285
#define ARMV8_IDX_COUNTER_LAST (cpu_pmu ) \
286
286
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
287
287
288
+
289
+ /*
290
+ * We unconditionally enable ARMv8.5-PMU long event counter support
291
+ * (64-bit events) where supported. Indicate if this arm_pmu has long
292
+ * event counter support.
293
+ */
294
+ static bool armv8pmu_has_long_event (struct arm_pmu * cpu_pmu )
295
+ {
296
+ return (cpu_pmu -> pmuver >= ID_AA64DFR0_PMUVER_8_5 );
297
+ }
298
+
288
299
/*
289
300
* We must chain two programmable counters for 64 bit events,
290
301
* except when we have allocated the 64bit cycle counter (for CPU
@@ -294,9 +305,11 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
294
305
static inline bool armv8pmu_event_is_chained (struct perf_event * event )
295
306
{
296
307
int idx = event -> hw .idx ;
308
+ struct arm_pmu * cpu_pmu = to_arm_pmu (event -> pmu );
297
309
298
310
return !WARN_ON (idx < 0 ) &&
299
311
armv8pmu_event_is_64bit (event ) &&
312
+ !armv8pmu_has_long_event (cpu_pmu ) &&
300
313
(idx != ARMV8_IDX_CYCLE_COUNTER );
301
314
}
302
315
@@ -345,7 +358,7 @@ static inline void armv8pmu_select_counter(int idx)
345
358
isb ();
346
359
}
347
360
348
- static inline u32 armv8pmu_read_evcntr (int idx )
361
+ static inline u64 armv8pmu_read_evcntr (int idx )
349
362
{
350
363
armv8pmu_select_counter (idx );
351
364
return read_sysreg (pmxevcntr_el0 );
@@ -362,6 +375,44 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
362
375
return val ;
363
376
}
364
377
378
+ /*
379
+ * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
380
+ * is set the event counters also become 64-bit counters. Unless the
381
+ * user has requested a long counter (attr.config1) then we want to
382
+ * interrupt upon 32-bit overflow - we achieve this by applying a bias.
383
+ */
384
+ static bool armv8pmu_event_needs_bias (struct perf_event * event )
385
+ {
386
+ struct arm_pmu * cpu_pmu = to_arm_pmu (event -> pmu );
387
+ struct hw_perf_event * hwc = & event -> hw ;
388
+ int idx = hwc -> idx ;
389
+
390
+ if (armv8pmu_event_is_64bit (event ))
391
+ return false;
392
+
393
+ if (armv8pmu_has_long_event (cpu_pmu ) ||
394
+ idx == ARMV8_IDX_CYCLE_COUNTER )
395
+ return true;
396
+
397
+ return false;
398
+ }
399
+
400
+ static u64 armv8pmu_bias_long_counter (struct perf_event * event , u64 value )
401
+ {
402
+ if (armv8pmu_event_needs_bias (event ))
403
+ value |= GENMASK (63 , 32 );
404
+
405
+ return value ;
406
+ }
407
+
408
+ static u64 armv8pmu_unbias_long_counter (struct perf_event * event , u64 value )
409
+ {
410
+ if (armv8pmu_event_needs_bias (event ))
411
+ value &= ~GENMASK (63 , 32 );
412
+
413
+ return value ;
414
+ }
415
+
365
416
static u64 armv8pmu_read_counter (struct perf_event * event )
366
417
{
367
418
struct arm_pmu * cpu_pmu = to_arm_pmu (event -> pmu );
@@ -377,10 +428,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
377
428
else
378
429
value = armv8pmu_read_hw_counter (event );
379
430
380
- return value ;
431
+ return armv8pmu_unbias_long_counter ( event , value ) ;
381
432
}
382
433
383
- static inline void armv8pmu_write_evcntr (int idx , u32 value )
434
+ static inline void armv8pmu_write_evcntr (int idx , u64 value )
384
435
{
385
436
armv8pmu_select_counter (idx );
386
437
write_sysreg (value , pmxevcntr_el0 );
@@ -405,20 +456,14 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
405
456
struct hw_perf_event * hwc = & event -> hw ;
406
457
int idx = hwc -> idx ;
407
458
459
+ value = armv8pmu_bias_long_counter (event , value );
460
+
408
461
if (!armv8pmu_counter_valid (cpu_pmu , idx ))
409
462
pr_err ("CPU%u writing wrong counter %d\n" ,
410
463
smp_processor_id (), idx );
411
- else if (idx == ARMV8_IDX_CYCLE_COUNTER ) {
412
- /*
413
- * The cycles counter is really a 64-bit counter.
414
- * When treating it as a 32-bit counter, we only count
415
- * the lower 32 bits, and set the upper 32-bits so that
416
- * we get an interrupt upon 32-bit overflow.
417
- */
418
- if (!armv8pmu_event_is_64bit (event ))
419
- value |= 0xffffffff00000000ULL ;
464
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER )
420
465
write_sysreg (value , pmccntr_el0 );
421
- } else
466
+ else
422
467
armv8pmu_write_hw_counter (event , value );
423
468
}
424
469
@@ -731,7 +776,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
731
776
/*
732
777
* Otherwise use events counters
733
778
*/
734
- if (armv8pmu_event_is_64bit (event ))
779
+ if (armv8pmu_event_is_64bit (event ) &&
780
+ !armv8pmu_has_long_event (cpu_pmu ))
735
781
return armv8pmu_get_chain_idx (cpuc , cpu_pmu );
736
782
else
737
783
return armv8pmu_get_single_idx (cpuc , cpu_pmu );
@@ -802,6 +848,9 @@ static int armv8pmu_filter_match(struct perf_event *event)
802
848
803
849
static void armv8pmu_reset (void * info )
804
850
{
851
+ struct arm_pmu * cpu_pmu = (struct arm_pmu * )info ;
852
+ u32 pmcr ;
853
+
805
854
/* The counter and interrupt enable registers are unknown at reset. */
806
855
armv8pmu_disable_counter (U32_MAX );
807
856
armv8pmu_disable_intens (U32_MAX );
@@ -813,8 +862,13 @@ static void armv8pmu_reset(void *info)
813
862
* Initialize & Reset PMNC. Request overflow interrupt for
814
863
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
815
864
*/
816
- armv8pmu_pmcr_write (ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
817
- ARMV8_PMU_PMCR_LC );
865
+ pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC ;
866
+
867
+ /* Enable long event counter support where available */
868
+ if (armv8pmu_has_long_event (cpu_pmu ))
869
+ pmcr |= ARMV8_PMU_PMCR_LP ;
870
+
871
+ armv8pmu_pmcr_write (pmcr );
818
872
}
819
873
820
874
static int __armv8_pmuv3_map_event (struct perf_event * event ,
@@ -897,6 +951,7 @@ static void __armv8pmu_probe_pmu(void *info)
897
951
if (pmuver == 0xf || pmuver == 0 )
898
952
return ;
899
953
954
+ cpu_pmu -> pmuver = pmuver ;
900
955
probe -> present = true;
901
956
902
957
/* Read the nb of CNTx counters supported from PMNC */
0 commit comments