@@ -34,28 +34,9 @@ static struct attribute_group iommu_pmu_events_attr_group = {
3434 .attrs = attrs_empty ,
3535};
3636
37- static cpumask_t iommu_pmu_cpu_mask ;
38-
39- static ssize_t
40- cpumask_show (struct device * dev , struct device_attribute * attr , char * buf )
41- {
42- return cpumap_print_to_pagebuf (true, buf , & iommu_pmu_cpu_mask );
43- }
44- static DEVICE_ATTR_RO (cpumask );
45-
46- static struct attribute * iommu_pmu_cpumask_attrs [] = {
47- & dev_attr_cpumask .attr ,
48- NULL
49- };
50-
51- static struct attribute_group iommu_pmu_cpumask_attr_group = {
52- .attrs = iommu_pmu_cpumask_attrs ,
53- };
54-
5537static const struct attribute_group * iommu_pmu_attr_groups [] = {
5638 & iommu_pmu_format_attr_group ,
5739 & iommu_pmu_events_attr_group ,
58- & iommu_pmu_cpumask_attr_group ,
5940 NULL
6041};
6142
@@ -565,6 +546,7 @@ static int __iommu_pmu_register(struct intel_iommu *iommu)
565546 iommu_pmu -> pmu .attr_groups = iommu_pmu_attr_groups ;
566547 iommu_pmu -> pmu .attr_update = iommu_pmu_attr_update ;
567548 iommu_pmu -> pmu .capabilities = PERF_PMU_CAP_NO_EXCLUDE ;
549+ iommu_pmu -> pmu .scope = PERF_PMU_SCOPE_SYS_WIDE ;
568550 iommu_pmu -> pmu .module = THIS_MODULE ;
569551
570552 return perf_pmu_register (& iommu_pmu -> pmu , iommu_pmu -> pmu .name , -1 );
@@ -773,89 +755,6 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
773755 iommu -> perf_irq = 0 ;
774756}
775757
776- static int iommu_pmu_cpu_online (unsigned int cpu , struct hlist_node * node )
777- {
778- struct iommu_pmu * iommu_pmu = hlist_entry_safe (node , typeof (* iommu_pmu ), cpuhp_node );
779-
780- if (cpumask_empty (& iommu_pmu_cpu_mask ))
781- cpumask_set_cpu (cpu , & iommu_pmu_cpu_mask );
782-
783- if (cpumask_test_cpu (cpu , & iommu_pmu_cpu_mask ))
784- iommu_pmu -> cpu = cpu ;
785-
786- return 0 ;
787- }
788-
789- static int iommu_pmu_cpu_offline (unsigned int cpu , struct hlist_node * node )
790- {
791- struct iommu_pmu * iommu_pmu = hlist_entry_safe (node , typeof (* iommu_pmu ), cpuhp_node );
792- int target = cpumask_first (& iommu_pmu_cpu_mask );
793-
794- /*
795- * The iommu_pmu_cpu_mask has been updated when offline the CPU
796- * for the first iommu_pmu. Migrate the other iommu_pmu to the
797- * new target.
798- */
799- if (target < nr_cpu_ids && target != iommu_pmu -> cpu ) {
800- perf_pmu_migrate_context (& iommu_pmu -> pmu , cpu , target );
801- iommu_pmu -> cpu = target ;
802- return 0 ;
803- }
804-
805- if (!cpumask_test_and_clear_cpu (cpu , & iommu_pmu_cpu_mask ))
806- return 0 ;
807-
808- target = cpumask_any_but (cpu_online_mask , cpu );
809-
810- if (target < nr_cpu_ids )
811- cpumask_set_cpu (target , & iommu_pmu_cpu_mask );
812- else
813- return 0 ;
814-
815- perf_pmu_migrate_context (& iommu_pmu -> pmu , cpu , target );
816- iommu_pmu -> cpu = target ;
817-
818- return 0 ;
819- }
820-
821- static int nr_iommu_pmu ;
822- static enum cpuhp_state iommu_cpuhp_slot ;
823-
824- static int iommu_pmu_cpuhp_setup (struct iommu_pmu * iommu_pmu )
825- {
826- int ret ;
827-
828- if (!nr_iommu_pmu ) {
829- ret = cpuhp_setup_state_multi (CPUHP_AP_ONLINE_DYN ,
830- "driver/iommu/intel/perfmon:online" ,
831- iommu_pmu_cpu_online ,
832- iommu_pmu_cpu_offline );
833- if (ret < 0 )
834- return ret ;
835- iommu_cpuhp_slot = ret ;
836- }
837-
838- ret = cpuhp_state_add_instance (iommu_cpuhp_slot , & iommu_pmu -> cpuhp_node );
839- if (ret ) {
840- if (!nr_iommu_pmu )
841- cpuhp_remove_multi_state (iommu_cpuhp_slot );
842- return ret ;
843- }
844- nr_iommu_pmu ++ ;
845-
846- return 0 ;
847- }
848-
849- static void iommu_pmu_cpuhp_free (struct iommu_pmu * iommu_pmu )
850- {
851- cpuhp_state_remove_instance (iommu_cpuhp_slot , & iommu_pmu -> cpuhp_node );
852-
853- if (-- nr_iommu_pmu )
854- return ;
855-
856- cpuhp_remove_multi_state (iommu_cpuhp_slot );
857- }
858-
859758void iommu_pmu_register (struct intel_iommu * iommu )
860759{
861760 struct iommu_pmu * iommu_pmu = iommu -> pmu ;
@@ -866,17 +765,12 @@ void iommu_pmu_register(struct intel_iommu *iommu)
866765 if (__iommu_pmu_register (iommu ))
867766 goto err ;
868767
869- if (iommu_pmu_cpuhp_setup (iommu_pmu ))
870- goto unregister ;
871-
872768 /* Set interrupt for overflow */
873769 if (iommu_pmu_set_interrupt (iommu ))
874- goto cpuhp_free ;
770+ goto unregister ;
875771
876772 return ;
877773
878- cpuhp_free :
879- iommu_pmu_cpuhp_free (iommu_pmu );
880774unregister :
881775 perf_pmu_unregister (& iommu_pmu -> pmu );
882776err :
@@ -892,6 +786,5 @@ void iommu_pmu_unregister(struct intel_iommu *iommu)
892786 return ;
893787
894788 iommu_pmu_unset_interrupt (iommu );
895- iommu_pmu_cpuhp_free (iommu_pmu );
896789 perf_pmu_unregister (& iommu_pmu -> pmu );
897790}
0 commit comments