@@ -148,7 +148,6 @@ struct rapl_model {
148
148
/* 1/2^hw_unit Joule */
149
149
static int rapl_hw_unit [NR_RAPL_DOMAINS ] __read_mostly ;
150
150
static struct rapl_pmus * rapl_pmus ;
151
- static cpumask_t rapl_cpu_mask ;
152
151
static unsigned int rapl_cntr_mask ;
153
152
static u64 rapl_timer_ms ;
154
153
static struct perf_msr * rapl_msrs ;
@@ -369,8 +368,6 @@ static int rapl_pmu_event_init(struct perf_event *event)
369
368
if (event -> cpu < 0 )
370
369
return - EINVAL ;
371
370
372
- event -> event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG ;
373
-
374
371
if (!cfg || cfg >= NR_RAPL_DOMAINS + 1 )
375
372
return - EINVAL ;
376
373
@@ -389,7 +386,6 @@ static int rapl_pmu_event_init(struct perf_event *event)
389
386
pmu = cpu_to_rapl_pmu (event -> cpu );
390
387
if (!pmu )
391
388
return - EINVAL ;
392
- event -> cpu = pmu -> cpu ;
393
389
event -> pmu_private = pmu ;
394
390
event -> hw .event_base = rapl_msrs [bit ].msr ;
395
391
event -> hw .config = cfg ;
@@ -403,23 +399,6 @@ static void rapl_pmu_event_read(struct perf_event *event)
403
399
rapl_event_update (event );
404
400
}
405
401
406
- static ssize_t rapl_get_attr_cpumask (struct device * dev ,
407
- struct device_attribute * attr , char * buf )
408
- {
409
- return cpumap_print_to_pagebuf (true, buf , & rapl_cpu_mask );
410
- }
411
-
412
- static DEVICE_ATTR (cpumask , S_IRUGO , rapl_get_attr_cpumask , NULL) ;
413
-
414
- static struct attribute * rapl_pmu_attrs [] = {
415
- & dev_attr_cpumask .attr ,
416
- NULL ,
417
- };
418
-
419
- static struct attribute_group rapl_pmu_attr_group = {
420
- .attrs = rapl_pmu_attrs ,
421
- };
422
-
423
402
RAPL_EVENT_ATTR_STR (energy - cores , rapl_cores , "event=0x01" );
424
403
RAPL_EVENT_ATTR_STR (energy - pkg , rapl_pkg , "event=0x02" );
425
404
RAPL_EVENT_ATTR_STR (energy - ram , rapl_ram , "event=0x03" );
@@ -467,7 +446,6 @@ static struct attribute_group rapl_pmu_format_group = {
467
446
};
468
447
469
448
static const struct attribute_group * rapl_attr_groups [] = {
470
- & rapl_pmu_attr_group ,
471
449
& rapl_pmu_format_group ,
472
450
& rapl_pmu_events_group ,
473
451
NULL ,
@@ -570,54 +548,6 @@ static struct perf_msr amd_rapl_msrs[] = {
570
548
[PERF_RAPL_PSYS ] = { 0 , & rapl_events_psys_group , NULL , false, 0 },
571
549
};
572
550
573
- static int rapl_cpu_offline (unsigned int cpu )
574
- {
575
- struct rapl_pmu * pmu = cpu_to_rapl_pmu (cpu );
576
- int target ;
577
-
578
- /* Check if exiting cpu is used for collecting rapl events */
579
- if (!cpumask_test_and_clear_cpu (cpu , & rapl_cpu_mask ))
580
- return 0 ;
581
-
582
- pmu -> cpu = -1 ;
583
- /* Find a new cpu to collect rapl events */
584
- target = cpumask_any_but (get_rapl_pmu_cpumask (cpu ), cpu );
585
-
586
- /* Migrate rapl events to the new target */
587
- if (target < nr_cpu_ids ) {
588
- cpumask_set_cpu (target , & rapl_cpu_mask );
589
- pmu -> cpu = target ;
590
- perf_pmu_migrate_context (pmu -> pmu , cpu , target );
591
- }
592
- return 0 ;
593
- }
594
-
595
- static int rapl_cpu_online (unsigned int cpu )
596
- {
597
- s32 rapl_pmu_idx = get_rapl_pmu_idx (cpu );
598
- if (rapl_pmu_idx < 0 ) {
599
- pr_err ("topology_logical_(package/die)_id() returned a negative value" );
600
- return - EINVAL ;
601
- }
602
- struct rapl_pmu * pmu = cpu_to_rapl_pmu (cpu );
603
- int target ;
604
-
605
- if (!pmu )
606
- return - ENOMEM ;
607
-
608
- /*
609
- * Check if there is an online cpu in the package which collects rapl
610
- * events already.
611
- */
612
- target = cpumask_any_and (& rapl_cpu_mask , get_rapl_pmu_cpumask (cpu ));
613
- if (target < nr_cpu_ids )
614
- return 0 ;
615
-
616
- cpumask_set_cpu (cpu , & rapl_cpu_mask );
617
- pmu -> cpu = cpu ;
618
- return 0 ;
619
- }
620
-
621
551
static int rapl_check_hw_unit (struct rapl_model * rm )
622
552
{
623
553
u64 msr_rapl_power_unit_bits ;
@@ -725,9 +655,12 @@ static int __init init_rapl_pmu(void)
725
655
static int __init init_rapl_pmus (void )
726
656
{
727
657
int nr_rapl_pmu = topology_max_packages ();
658
+ int rapl_pmu_scope = PERF_PMU_SCOPE_PKG ;
728
659
729
- if (!rapl_pmu_is_pkg_scope ())
660
+ if (!rapl_pmu_is_pkg_scope ()) {
730
661
nr_rapl_pmu *= topology_max_dies_per_package ();
662
+ rapl_pmu_scope = PERF_PMU_SCOPE_DIE ;
663
+ }
731
664
732
665
rapl_pmus = kzalloc (struct_size (rapl_pmus , pmus , nr_rapl_pmu ), GFP_KERNEL );
733
666
if (!rapl_pmus )
@@ -743,6 +676,7 @@ static int __init init_rapl_pmus(void)
743
676
rapl_pmus -> pmu .start = rapl_pmu_event_start ;
744
677
rapl_pmus -> pmu .stop = rapl_pmu_event_stop ;
745
678
rapl_pmus -> pmu .read = rapl_pmu_event_read ;
679
+ rapl_pmus -> pmu .scope = rapl_pmu_scope ;
746
680
rapl_pmus -> pmu .module = THIS_MODULE ;
747
681
rapl_pmus -> pmu .capabilities = PERF_PMU_CAP_NO_EXCLUDE ;
748
682
@@ -892,24 +826,13 @@ static int __init rapl_pmu_init(void)
892
826
if (ret )
893
827
return ret ;
894
828
895
- /*
896
- * Install callbacks. Core will call them for each online cpu.
897
- */
898
- ret = cpuhp_setup_state (CPUHP_AP_PERF_X86_RAPL_ONLINE ,
899
- "perf/x86/rapl:online" ,
900
- rapl_cpu_online , rapl_cpu_offline );
901
- if (ret )
902
- goto out ;
903
-
904
829
ret = perf_pmu_register (& rapl_pmus -> pmu , "power" , -1 );
905
830
if (ret )
906
- goto out1 ;
831
+ goto out ;
907
832
908
833
rapl_advertise ();
909
834
return 0 ;
910
835
911
- out1 :
912
- cpuhp_remove_state (CPUHP_AP_PERF_X86_RAPL_ONLINE );
913
836
out :
914
837
pr_warn ("Initialization failed (%d), disabled\n" , ret );
915
838
cleanup_rapl_pmus ();
@@ -919,7 +842,6 @@ module_init(rapl_pmu_init);
919
842
920
843
static void __exit intel_rapl_exit (void )
921
844
{
922
- cpuhp_remove_state_nocalls (CPUHP_AP_PERF_X86_RAPL_ONLINE );
923
845
perf_pmu_unregister (& rapl_pmus -> pmu );
924
846
cleanup_rapl_pmus ();
925
847
}
0 commit comments