100100#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
101101#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
102102
103- /* Check if field f in flags is set with value v */
104- #define CHECK_APMT_FLAG (flags , f , v ) \
105- ((flags & (ACPI_APMT_FLAGS_ ## f)) == (ACPI_APMT_FLAGS_ ## f ## _ ## v))
106-
107103/* Check and use default if implementer doesn't provide attribute callback */
108104#define CHECK_DEFAULT_IMPL_OPS (ops , callback ) \
109105 do { \
121117
122118static unsigned long arm_cspmu_cpuhp_state ;
123119
120+ static struct acpi_apmt_node * arm_cspmu_apmt_node (struct device * dev )
121+ {
122+ return * (struct acpi_apmt_node * * )dev_get_platdata (dev );
123+ }
124+
124125/*
125126 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
126127 * counter register. The counter register can be implemented as 32-bit or 64-bit
@@ -155,12 +156,6 @@ static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
155156 return val ;
156157}
157158
158- /* Check if PMU supports 64-bit single copy atomic. */
159- static inline bool supports_64bit_atomics (const struct arm_cspmu * cspmu )
160- {
161- return CHECK_APMT_FLAG (cspmu -> apmt_node -> flags , ATOMIC , SUPP );
162- }
163-
164159/* Check if cycle counter is supported. */
165160static inline bool supports_cycle_counter (const struct arm_cspmu * cspmu )
166161{
@@ -319,7 +314,7 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
319314 static atomic_t pmu_idx [ACPI_APMT_NODE_TYPE_COUNT ] = { 0 };
320315
321316 dev = cspmu -> dev ;
322- apmt_node = cspmu -> apmt_node ;
317+ apmt_node = arm_cspmu_apmt_node ( dev ) ;
323318 pmu_type = apmt_node -> type ;
324319
325320 if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT ) {
@@ -396,8 +391,8 @@ static const struct impl_match impl_match[] = {
396391static int arm_cspmu_init_impl_ops (struct arm_cspmu * cspmu )
397392{
398393 int ret ;
399- struct acpi_apmt_node * apmt_node = cspmu -> apmt_node ;
400394 struct arm_cspmu_impl_ops * impl_ops = & cspmu -> impl .ops ;
395+ struct acpi_apmt_node * apmt_node = arm_cspmu_apmt_node (cspmu -> dev );
401396 const struct impl_match * match = impl_match ;
402397
403398 /*
@@ -719,7 +714,7 @@ static u64 arm_cspmu_read_counter(struct perf_event *event)
719714 offset = counter_offset (sizeof (u64 ), event -> hw .idx );
720715 counter_addr = cspmu -> base1 + offset ;
721716
722- return supports_64bit_atomics ( cspmu ) ?
717+ return cspmu -> has_atomic_dword ?
723718 readq (counter_addr ) :
724719 read_reg64_hilohi (counter_addr , HILOHI_MAX_POLL );
725720 }
@@ -910,36 +905,28 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
910905{
911906 struct acpi_apmt_node * apmt_node ;
912907 struct arm_cspmu * cspmu ;
913- struct device * dev ;
914-
915- dev = & pdev -> dev ;
916- apmt_node = * (struct acpi_apmt_node * * )dev_get_platdata (dev );
917- if (!apmt_node ) {
918- dev_err (dev , "failed to get APMT node\n" );
919- return NULL ;
920- }
908+ struct device * dev = & pdev -> dev ;
921909
922910 cspmu = devm_kzalloc (dev , sizeof (* cspmu ), GFP_KERNEL );
923911 if (!cspmu )
924912 return NULL ;
925913
926914 cspmu -> dev = dev ;
927- cspmu -> apmt_node = apmt_node ;
928-
929915 platform_set_drvdata (pdev , cspmu );
930916
917+ apmt_node = arm_cspmu_apmt_node (dev );
918+ cspmu -> has_atomic_dword = apmt_node -> flags & ACPI_APMT_FLAGS_ATOMIC ;
919+
931920 return cspmu ;
932921}
933922
934923static int arm_cspmu_init_mmio (struct arm_cspmu * cspmu )
935924{
936925 struct device * dev ;
937926 struct platform_device * pdev ;
938- struct acpi_apmt_node * apmt_node ;
939927
940928 dev = cspmu -> dev ;
941929 pdev = to_platform_device (dev );
942- apmt_node = cspmu -> apmt_node ;
943930
944931 /* Base address for page 0. */
945932 cspmu -> base0 = devm_platform_ioremap_resource (pdev , 0 );
@@ -950,7 +937,7 @@ static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
950937
951938 /* Base address for page 1 if supported. Otherwise point to page 0. */
952939 cspmu -> base1 = cspmu -> base0 ;
953- if (CHECK_APMT_FLAG ( apmt_node -> flags , DUAL_PAGE , SUPP )) {
940+ if (platform_get_resource ( pdev , IORESOURCE_MEM , 1 )) {
954941 cspmu -> base1 = devm_platform_ioremap_resource (pdev , 1 );
955942 if (IS_ERR (cspmu -> base1 )) {
956943 dev_err (dev , "ioremap failed for page-1 resource\n" );
@@ -1047,19 +1034,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
10471034 int irq , ret ;
10481035 struct device * dev ;
10491036 struct platform_device * pdev ;
1050- struct acpi_apmt_node * apmt_node ;
10511037
10521038 dev = cspmu -> dev ;
10531039 pdev = to_platform_device (dev );
1054- apmt_node = cspmu -> apmt_node ;
10551040
10561041 /* Skip IRQ request if the PMU does not support overflow interrupt. */
1057- if (apmt_node -> ovflw_irq == 0 )
1058- return 0 ;
1059-
1060- irq = platform_get_irq (pdev , 0 );
1042+ irq = platform_get_irq_optional (pdev , 0 );
10611043 if (irq < 0 )
1062- return irq ;
1044+ return irq == - ENXIO ? 0 : irq ;
10631045
10641046 ret = devm_request_irq (dev , irq , arm_cspmu_handle_irq ,
10651047 IRQF_NOBALANCING | IRQF_NO_THREAD , dev_name (dev ),
@@ -1103,13 +1085,11 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
11031085
11041086static int arm_cspmu_acpi_get_cpus (struct arm_cspmu * cspmu )
11051087{
1106- struct device * dev ;
11071088 struct acpi_apmt_node * apmt_node ;
11081089 int affinity_flag ;
11091090 int cpu ;
11101091
1111- dev = cspmu -> pmu .dev ;
1112- apmt_node = cspmu -> apmt_node ;
1092+ apmt_node = arm_cspmu_apmt_node (cspmu -> dev );
11131093 affinity_flag = apmt_node -> flags & ACPI_APMT_FLAGS_AFFINITY ;
11141094
11151095 if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC ) {
@@ -1131,7 +1111,7 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
11311111 }
11321112
11331113 if (cpumask_empty (& cspmu -> associated_cpus )) {
1134- dev_dbg (dev , "No cpu associated with the PMU\n" );
1114+ dev_dbg (cspmu -> dev , "No cpu associated with the PMU\n" );
11351115 return - ENODEV ;
11361116 }
11371117
0 commit comments