Skip to content

Commit 0045de7

Browse files
Gowthami Thiagarajanwilldeacon
authored andcommitted
perf/marvell: Refactor to extract PMU operations
Introduce a refactor to the Marvell DDR PMU driver to extract PMU operations ("pmu ops") from the existing driver. Reviewed-by: Jonathan Cameron <[email protected]> Signed-off-by: Gowthami Thiagarajan <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 349f77e commit 0045de7

File tree

1 file changed

+83
-22
lines changed

1 file changed

+83
-22
lines changed

drivers/perf/marvell_cn10k_ddr_pmu.c

Lines changed: 83 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ struct cn10k_ddr_pmu {
127127
struct pmu pmu;
128128
void __iomem *base;
129129
const struct ddr_pmu_platform_data *p_data;
130+
const struct ddr_pmu_ops *ops;
130131
unsigned int cpu;
131132
struct device *dev;
132133
int active_events;
@@ -135,6 +136,16 @@ struct cn10k_ddr_pmu {
135136
struct hlist_node node;
136137
};
137138

139+
struct ddr_pmu_ops {
140+
void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
141+
bool enable);
142+
void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
143+
bool enable);
144+
void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
145+
void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
146+
void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
147+
};
148+
138149
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
139150

140151
struct ddr_pmu_platform_data {
@@ -375,6 +386,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
375386
int counter, bool enable)
376387
{
377388
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
389+
const struct ddr_pmu_ops *ops = pmu->ops;
378390
u32 reg;
379391
u64 val;
380392

@@ -394,21 +406,10 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
394406

395407
writeq_relaxed(val, pmu->base + reg);
396408
} else {
397-
val = readq_relaxed(pmu->base +
398-
p_data->cnt_freerun_en);
399-
if (enable) {
400-
if (counter == DDRC_PERF_READ_COUNTER_IDX)
401-
val |= DDRC_PERF_FREERUN_READ_EN;
402-
else
403-
val |= DDRC_PERF_FREERUN_WRITE_EN;
404-
} else {
405-
if (counter == DDRC_PERF_READ_COUNTER_IDX)
406-
val &= ~DDRC_PERF_FREERUN_READ_EN;
407-
else
408-
val &= ~DDRC_PERF_FREERUN_WRITE_EN;
409-
}
410-
writeq_relaxed(val, pmu->base +
411-
p_data->cnt_freerun_en);
409+
if (counter == DDRC_PERF_READ_COUNTER_IDX)
410+
ops->enable_read_freerun_counter(pmu, enable);
411+
else
412+
ops->enable_write_freerun_counter(pmu, enable);
412413
}
413414
}
414415

@@ -464,6 +465,7 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
464465
{
465466
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
466467
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
468+
const struct ddr_pmu_ops *ops = pmu->ops;
467469
struct hw_perf_event *hwc = &event->hw;
468470
u8 config = event->attr.config;
469471
int counter, ret;
@@ -492,11 +494,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
492494
} else {
493495
/* fixed event counter, clear counter value */
494496
if (counter == DDRC_PERF_READ_COUNTER_IDX)
495-
val = DDRC_FREERUN_READ_CNT_CLR;
497+
ops->clear_read_freerun_counter(pmu);
496498
else
497-
val = DDRC_FREERUN_WRITE_CNT_CLR;
498-
499-
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
499+
ops->clear_write_freerun_counter(pmu);
500500
}
501501

502502
hwc->state |= PERF_HES_STOPPED;
@@ -578,9 +578,63 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
578578
}
579579
}
580580

581+
static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
582+
{
583+
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
584+
u64 val;
585+
586+
val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
587+
if (enable)
588+
val |= DDRC_PERF_FREERUN_READ_EN;
589+
else
590+
val &= ~DDRC_PERF_FREERUN_READ_EN;
591+
592+
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
593+
}
594+
595+
static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
596+
{
597+
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
598+
u64 val;
599+
600+
val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
601+
if (enable)
602+
val |= DDRC_PERF_FREERUN_WRITE_EN;
603+
else
604+
val &= ~DDRC_PERF_FREERUN_WRITE_EN;
605+
606+
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
607+
}
608+
609+
static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
610+
{
611+
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
612+
u64 val;
613+
614+
val = DDRC_FREERUN_READ_CNT_CLR;
615+
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
616+
}
617+
618+
static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
619+
{
620+
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
621+
u64 val;
622+
623+
val = DDRC_FREERUN_WRITE_CNT_CLR;
624+
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
625+
}
626+
627+
static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
628+
{
629+
cn10k_ddr_perf_event_update_all(pmu);
630+
cn10k_ddr_perf_pmu_disable(&pmu->pmu);
631+
cn10k_ddr_perf_pmu_enable(&pmu->pmu);
632+
}
633+
581634
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
582635
{
583636
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
637+
const struct ddr_pmu_ops *ops = pmu->ops;
584638
struct perf_event *event;
585639
struct hw_perf_event *hwc;
586640
u64 prev_count, new_count;
@@ -620,9 +674,7 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
620674
value = cn10k_ddr_perf_read_counter(pmu, i);
621675
if (value == p_data->counter_max_val) {
622676
pr_info("Counter-(%d) reached max value\n", i);
623-
cn10k_ddr_perf_event_update_all(pmu);
624-
cn10k_ddr_perf_pmu_disable(&pmu->pmu);
625-
cn10k_ddr_perf_pmu_enable(&pmu->pmu);
677+
ops->pmu_overflow_handler(pmu, i);
626678
}
627679
}
628680

@@ -661,6 +713,14 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
661713
return 0;
662714
}
663715

716+
static const struct ddr_pmu_ops ddr_pmu_ops = {
717+
.enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
718+
.enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
719+
.clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
720+
.clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
721+
.pmu_overflow_handler = ddr_pmu_overflow_hander,
722+
};
723+
664724
#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
665725
static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
666726
.counter_overflow_val = BIT_ULL(48),
@@ -713,6 +773,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
713773
is_cn10k = ddr_pmu->p_data->is_cn10k;
714774

715775
if (is_cn10k) {
776+
ddr_pmu->ops = &ddr_pmu_ops;
716777
/* Setup the PMU counter to work in manual mode */
717778
writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
718779
ddr_pmu->p_data->cnt_op_mode_ctrl);

0 commit comments

Comments
 (0)