@@ -127,6 +127,7 @@ struct cn10k_ddr_pmu {
127
127
struct pmu pmu ;
128
128
void __iomem * base ;
129
129
const struct ddr_pmu_platform_data * p_data ;
130
+ const struct ddr_pmu_ops * ops ;
130
131
unsigned int cpu ;
131
132
struct device * dev ;
132
133
int active_events ;
@@ -135,6 +136,16 @@ struct cn10k_ddr_pmu {
135
136
struct hlist_node node ;
136
137
};
137
138
139
+ struct ddr_pmu_ops {
140
+ void (* enable_read_freerun_counter )(struct cn10k_ddr_pmu * pmu ,
141
+ bool enable );
142
+ void (* enable_write_freerun_counter )(struct cn10k_ddr_pmu * pmu ,
143
+ bool enable );
144
+ void (* clear_read_freerun_counter )(struct cn10k_ddr_pmu * pmu );
145
+ void (* clear_write_freerun_counter )(struct cn10k_ddr_pmu * pmu );
146
+ void (* pmu_overflow_handler )(struct cn10k_ddr_pmu * pmu , int evt_idx );
147
+ };
148
+
138
149
#define to_cn10k_ddr_pmu (p ) container_of(p, struct cn10k_ddr_pmu, pmu)
139
150
140
151
struct ddr_pmu_platform_data {
@@ -375,6 +386,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
375
386
int counter , bool enable )
376
387
{
377
388
const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
389
+ const struct ddr_pmu_ops * ops = pmu -> ops ;
378
390
u32 reg ;
379
391
u64 val ;
380
392
@@ -394,21 +406,10 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
394
406
395
407
writeq_relaxed (val , pmu -> base + reg );
396
408
} else {
397
- val = readq_relaxed (pmu -> base +
398
- p_data -> cnt_freerun_en );
399
- if (enable ) {
400
- if (counter == DDRC_PERF_READ_COUNTER_IDX )
401
- val |= DDRC_PERF_FREERUN_READ_EN ;
402
- else
403
- val |= DDRC_PERF_FREERUN_WRITE_EN ;
404
- } else {
405
- if (counter == DDRC_PERF_READ_COUNTER_IDX )
406
- val &= ~DDRC_PERF_FREERUN_READ_EN ;
407
- else
408
- val &= ~DDRC_PERF_FREERUN_WRITE_EN ;
409
- }
410
- writeq_relaxed (val , pmu -> base +
411
- p_data -> cnt_freerun_en );
409
+ if (counter == DDRC_PERF_READ_COUNTER_IDX )
410
+ ops -> enable_read_freerun_counter (pmu , enable );
411
+ else
412
+ ops -> enable_write_freerun_counter (pmu , enable );
412
413
}
413
414
}
414
415
@@ -464,6 +465,7 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
464
465
{
465
466
struct cn10k_ddr_pmu * pmu = to_cn10k_ddr_pmu (event -> pmu );
466
467
const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
468
+ const struct ddr_pmu_ops * ops = pmu -> ops ;
467
469
struct hw_perf_event * hwc = & event -> hw ;
468
470
u8 config = event -> attr .config ;
469
471
int counter , ret ;
@@ -492,11 +494,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
492
494
} else {
493
495
/* fixed event counter, clear counter value */
494
496
if (counter == DDRC_PERF_READ_COUNTER_IDX )
495
- val = DDRC_FREERUN_READ_CNT_CLR ;
497
+ ops -> clear_read_freerun_counter ( pmu ) ;
496
498
else
497
- val = DDRC_FREERUN_WRITE_CNT_CLR ;
498
-
499
- writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_ctrl );
499
+ ops -> clear_write_freerun_counter (pmu );
500
500
}
501
501
502
502
hwc -> state |= PERF_HES_STOPPED ;
@@ -578,9 +578,63 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
578
578
}
579
579
}
580
580
581
+ static void ddr_pmu_enable_read_freerun (struct cn10k_ddr_pmu * pmu , bool enable )
582
+ {
583
+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
584
+ u64 val ;
585
+
586
+ val = readq_relaxed (pmu -> base + p_data -> cnt_freerun_en );
587
+ if (enable )
588
+ val |= DDRC_PERF_FREERUN_READ_EN ;
589
+ else
590
+ val &= ~DDRC_PERF_FREERUN_READ_EN ;
591
+
592
+ writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_en );
593
+ }
594
+
595
+ static void ddr_pmu_enable_write_freerun (struct cn10k_ddr_pmu * pmu , bool enable )
596
+ {
597
+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
598
+ u64 val ;
599
+
600
+ val = readq_relaxed (pmu -> base + p_data -> cnt_freerun_en );
601
+ if (enable )
602
+ val |= DDRC_PERF_FREERUN_WRITE_EN ;
603
+ else
604
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN ;
605
+
606
+ writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_en );
607
+ }
608
+
609
+ static void ddr_pmu_read_clear_freerun (struct cn10k_ddr_pmu * pmu )
610
+ {
611
+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
612
+ u64 val ;
613
+
614
+ val = DDRC_FREERUN_READ_CNT_CLR ;
615
+ writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_ctrl );
616
+ }
617
+
618
+ static void ddr_pmu_write_clear_freerun (struct cn10k_ddr_pmu * pmu )
619
+ {
620
+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
621
+ u64 val ;
622
+
623
+ val = DDRC_FREERUN_WRITE_CNT_CLR ;
624
+ writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_ctrl );
625
+ }
626
+
627
+ static void ddr_pmu_overflow_hander (struct cn10k_ddr_pmu * pmu , int evt_idx )
628
+ {
629
+ cn10k_ddr_perf_event_update_all (pmu );
630
+ cn10k_ddr_perf_pmu_disable (& pmu -> pmu );
631
+ cn10k_ddr_perf_pmu_enable (& pmu -> pmu );
632
+ }
633
+
581
634
static irqreturn_t cn10k_ddr_pmu_overflow_handler (struct cn10k_ddr_pmu * pmu )
582
635
{
583
636
const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
637
+ const struct ddr_pmu_ops * ops = pmu -> ops ;
584
638
struct perf_event * event ;
585
639
struct hw_perf_event * hwc ;
586
640
u64 prev_count , new_count ;
@@ -620,9 +674,7 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
620
674
value = cn10k_ddr_perf_read_counter (pmu , i );
621
675
if (value == p_data -> counter_max_val ) {
622
676
pr_info ("Counter-(%d) reached max value\n" , i );
623
- cn10k_ddr_perf_event_update_all (pmu );
624
- cn10k_ddr_perf_pmu_disable (& pmu -> pmu );
625
- cn10k_ddr_perf_pmu_enable (& pmu -> pmu );
677
+ ops -> pmu_overflow_handler (pmu , i );
626
678
}
627
679
}
628
680
@@ -661,6 +713,14 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
661
713
return 0 ;
662
714
}
663
715
716
+ static const struct ddr_pmu_ops ddr_pmu_ops = {
717
+ .enable_read_freerun_counter = ddr_pmu_enable_read_freerun ,
718
+ .enable_write_freerun_counter = ddr_pmu_enable_write_freerun ,
719
+ .clear_read_freerun_counter = ddr_pmu_read_clear_freerun ,
720
+ .clear_write_freerun_counter = ddr_pmu_write_clear_freerun ,
721
+ .pmu_overflow_handler = ddr_pmu_overflow_hander ,
722
+ };
723
+
664
724
#if defined(CONFIG_ACPI ) || defined(CONFIG_OF )
665
725
static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
666
726
.counter_overflow_val = BIT_ULL (48 ),
@@ -713,6 +773,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
713
773
is_cn10k = ddr_pmu -> p_data -> is_cn10k ;
714
774
715
775
if (is_cn10k ) {
776
+ ddr_pmu -> ops = & ddr_pmu_ops ;
716
777
/* Setup the PMU counter to work in manual mode */
717
778
writeq_relaxed (OP_MODE_CTRL_VAL_MANUAL , ddr_pmu -> base +
718
779
ddr_pmu -> p_data -> cnt_op_mode_ctrl );
0 commit comments