@@ -189,53 +189,57 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
189
189
190
190
#ifdef CONFIG_X86_LOCAL_APIC
191
191
192
- static inline int get_possible_num_counters (void )
192
+ static inline u64 get_possible_counter_mask (void )
193
193
{
194
- int i , num_counters = x86_pmu .num_counters ;
194
+ u64 cntr_mask = x86_pmu .cntr_mask64 ;
195
+ int i ;
195
196
196
197
if (!is_hybrid ())
197
- return num_counters ;
198
+ return cntr_mask ;
198
199
199
200
for (i = 0 ; i < x86_pmu .num_hybrid_pmus ; i ++ )
200
- num_counters = max_t ( int , num_counters , x86_pmu .hybrid_pmu [i ].num_counters ) ;
201
+ cntr_mask |= x86_pmu .hybrid_pmu [i ].cntr_mask64 ;
201
202
202
- return num_counters ;
203
+ return cntr_mask ;
203
204
}
204
205
205
206
static bool reserve_pmc_hardware (void )
206
207
{
207
- int i , num_counters = get_possible_num_counters ();
208
+ u64 cntr_mask = get_possible_counter_mask ();
209
+ int i , end ;
208
210
209
- for ( i = 0 ; i < num_counters ; i ++ ) {
211
+ for_each_set_bit ( i , ( unsigned long * ) & cntr_mask , X86_PMC_IDX_MAX ) {
210
212
if (!reserve_perfctr_nmi (x86_pmu_event_addr (i )))
211
213
goto perfctr_fail ;
212
214
}
213
215
214
- for ( i = 0 ; i < num_counters ; i ++ ) {
216
+ for_each_set_bit ( i , ( unsigned long * ) & cntr_mask , X86_PMC_IDX_MAX ) {
215
217
if (!reserve_evntsel_nmi (x86_pmu_config_addr (i )))
216
218
goto eventsel_fail ;
217
219
}
218
220
219
221
return true;
220
222
221
223
eventsel_fail :
222
- for (i -- ; i >= 0 ; i -- )
224
+ end = i ;
225
+ for_each_set_bit (i , (unsigned long * )& cntr_mask , end )
223
226
release_evntsel_nmi (x86_pmu_config_addr (i ));
224
-
225
- i = num_counters ;
227
+ i = X86_PMC_IDX_MAX ;
226
228
227
229
perfctr_fail :
228
- for (i -- ; i >= 0 ; i -- )
230
+ end = i ;
231
+ for_each_set_bit (i , (unsigned long * )& cntr_mask , end )
229
232
release_perfctr_nmi (x86_pmu_event_addr (i ));
230
233
231
234
return false;
232
235
}
233
236
234
237
static void release_pmc_hardware (void )
235
238
{
236
- int i , num_counters = get_possible_num_counters ();
239
+ u64 cntr_mask = get_possible_counter_mask ();
240
+ int i ;
237
241
238
- for ( i = 0 ; i < num_counters ; i ++ ) {
242
+ for_each_set_bit ( i , ( unsigned long * ) & cntr_mask , X86_PMC_IDX_MAX ) {
239
243
release_perfctr_nmi (x86_pmu_event_addr (i ));
240
244
release_evntsel_nmi (x86_pmu_config_addr (i ));
241
245
}
@@ -248,7 +252,8 @@ static void release_pmc_hardware(void) {}
248
252
249
253
#endif
250
254
251
- bool check_hw_exists (struct pmu * pmu , int num_counters , int num_counters_fixed )
255
+ bool check_hw_exists (struct pmu * pmu , unsigned long * cntr_mask ,
256
+ unsigned long * fixed_cntr_mask )
252
257
{
253
258
u64 val , val_fail = -1 , val_new = ~0 ;
254
259
int i , reg , reg_fail = -1 , ret = 0 ;
@@ -259,7 +264,7 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
259
264
* Check to see if the BIOS enabled any of the counters, if so
260
265
* complain and bail.
261
266
*/
262
- for ( i = 0 ; i < num_counters ; i ++ ) {
267
+ for_each_set_bit ( i , cntr_mask , X86_PMC_IDX_MAX ) {
263
268
reg = x86_pmu_config_addr (i );
264
269
ret = rdmsrl_safe (reg , & val );
265
270
if (ret )
@@ -273,12 +278,12 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
273
278
}
274
279
}
275
280
276
- if (num_counters_fixed ) {
281
+ if (* ( u64 * ) fixed_cntr_mask ) {
277
282
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL ;
278
283
ret = rdmsrl_safe (reg , & val );
279
284
if (ret )
280
285
goto msr_fail ;
281
- for ( i = 0 ; i < num_counters_fixed ; i ++ ) {
286
+ for_each_set_bit ( i , fixed_cntr_mask , X86_PMC_IDX_MAX ) {
282
287
if (fixed_counter_disabled (i , pmu ))
283
288
continue ;
284
289
if (val & (0x03ULL << i * 4 )) {
@@ -679,7 +684,7 @@ void x86_pmu_disable_all(void)
679
684
struct cpu_hw_events * cpuc = this_cpu_ptr (& cpu_hw_events );
680
685
int idx ;
681
686
682
- for (idx = 0 ; idx < x86_pmu .num_counters ; idx ++ ) {
687
+ for_each_set_bit (idx , x86_pmu .cntr_mask , X86_PMC_IDX_MAX ) {
683
688
struct hw_perf_event * hwc = & cpuc -> events [idx ]-> hw ;
684
689
u64 val ;
685
690
@@ -736,7 +741,7 @@ void x86_pmu_enable_all(int added)
736
741
struct cpu_hw_events * cpuc = this_cpu_ptr (& cpu_hw_events );
737
742
int idx ;
738
743
739
- for (idx = 0 ; idx < x86_pmu .num_counters ; idx ++ ) {
744
+ for_each_set_bit (idx , x86_pmu .cntr_mask , X86_PMC_IDX_MAX ) {
740
745
struct hw_perf_event * hwc = & cpuc -> events [idx ]-> hw ;
741
746
742
747
if (!test_bit (idx , cpuc -> active_mask ))
@@ -975,7 +980,6 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
975
980
976
981
int x86_schedule_events (struct cpu_hw_events * cpuc , int n , int * assign )
977
982
{
978
- int num_counters = hybrid (cpuc -> pmu , num_counters );
979
983
struct event_constraint * c ;
980
984
struct perf_event * e ;
981
985
int n0 , i , wmin , wmax , unsched = 0 ;
@@ -1051,7 +1055,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1051
1055
1052
1056
/* slow path */
1053
1057
if (i != n ) {
1054
- int gpmax = num_counters ;
1058
+ int gpmax = x86_pmu_max_num_counters ( cpuc -> pmu ) ;
1055
1059
1056
1060
/*
1057
1061
* Do not allow scheduling of more than half the available
@@ -1072,7 +1076,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1072
1076
* the extra Merge events needed by large increment events.
1073
1077
*/
1074
1078
if (x86_pmu .flags & PMU_FL_PAIR ) {
1075
- gpmax = num_counters - cpuc -> n_pair ;
1079
+ gpmax -= cpuc -> n_pair ;
1076
1080
WARN_ON (gpmax <= 0 );
1077
1081
}
1078
1082
@@ -1157,12 +1161,10 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
1157
1161
*/
1158
1162
static int collect_events (struct cpu_hw_events * cpuc , struct perf_event * leader , bool dogrp )
1159
1163
{
1160
- int num_counters = hybrid (cpuc -> pmu , num_counters );
1161
- int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
1162
1164
struct perf_event * event ;
1163
1165
int n , max_count ;
1164
1166
1165
- max_count = num_counters + num_counters_fixed ;
1167
+ max_count = x86_pmu_num_counters ( cpuc -> pmu ) + x86_pmu_num_counters_fixed ( cpuc -> pmu ) ;
1166
1168
1167
1169
/* current number of events already accepted */
1168
1170
n = cpuc -> n_events ;
@@ -1522,13 +1524,13 @@ void perf_event_print_debug(void)
1522
1524
u64 pebs , debugctl ;
1523
1525
int cpu = smp_processor_id ();
1524
1526
struct cpu_hw_events * cpuc = & per_cpu (cpu_hw_events , cpu );
1525
- int num_counters = hybrid (cpuc -> pmu , num_counters );
1526
- int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
1527
+ unsigned long * cntr_mask = hybrid (cpuc -> pmu , cntr_mask );
1528
+ unsigned long * fixed_cntr_mask = hybrid (cpuc -> pmu , fixed_cntr_mask );
1527
1529
struct event_constraint * pebs_constraints = hybrid (cpuc -> pmu , pebs_constraints );
1528
1530
unsigned long flags ;
1529
1531
int idx ;
1530
1532
1531
- if (!num_counters )
1533
+ if (!* ( u64 * ) cntr_mask )
1532
1534
return ;
1533
1535
1534
1536
local_irq_save (flags );
@@ -1555,7 +1557,7 @@ void perf_event_print_debug(void)
1555
1557
}
1556
1558
pr_info ("CPU#%d: active: %016llx\n" , cpu , * (u64 * )cpuc -> active_mask );
1557
1559
1558
- for (idx = 0 ; idx < num_counters ; idx ++ ) {
1560
+ for_each_set_bit (idx , cntr_mask , X86_PMC_IDX_MAX ) {
1559
1561
rdmsrl (x86_pmu_config_addr (idx ), pmc_ctrl );
1560
1562
rdmsrl (x86_pmu_event_addr (idx ), pmc_count );
1561
1563
@@ -1568,7 +1570,7 @@ void perf_event_print_debug(void)
1568
1570
pr_info ("CPU#%d: gen-PMC%d left: %016llx\n" ,
1569
1571
cpu , idx , prev_left );
1570
1572
}
1571
- for (idx = 0 ; idx < num_counters_fixed ; idx ++ ) {
1573
+ for_each_set_bit (idx , fixed_cntr_mask , X86_PMC_IDX_MAX ) {
1572
1574
if (fixed_counter_disabled (idx , cpuc -> pmu ))
1573
1575
continue ;
1574
1576
rdmsrl (MSR_ARCH_PERFMON_FIXED_CTR0 + idx , pmc_count );
@@ -1682,7 +1684,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1682
1684
*/
1683
1685
apic_write (APIC_LVTPC , APIC_DM_NMI );
1684
1686
1685
- for (idx = 0 ; idx < x86_pmu .num_counters ; idx ++ ) {
1687
+ for_each_set_bit (idx , x86_pmu .cntr_mask , X86_PMC_IDX_MAX ) {
1686
1688
if (!test_bit (idx , cpuc -> active_mask ))
1687
1689
continue ;
1688
1690
@@ -2038,18 +2040,15 @@ static void _x86_pmu_read(struct perf_event *event)
2038
2040
static_call (x86_pmu_update )(event );
2039
2041
}
2040
2042
2041
- void x86_pmu_show_pmu_cap (int num_counters , int num_counters_fixed ,
2042
- u64 intel_ctrl )
2043
+ void x86_pmu_show_pmu_cap (struct pmu * pmu )
2043
2044
{
2044
2045
pr_info ("... version: %d\n" , x86_pmu .version );
2045
2046
pr_info ("... bit width: %d\n" , x86_pmu .cntval_bits );
2046
- pr_info ("... generic registers: %d\n" , num_counters );
2047
+ pr_info ("... generic registers: %d\n" , x86_pmu_num_counters ( pmu ) );
2047
2048
pr_info ("... value mask: %016Lx\n" , x86_pmu .cntval_mask );
2048
2049
pr_info ("... max period: %016Lx\n" , x86_pmu .max_period );
2049
- pr_info ("... fixed-purpose events: %lu\n" ,
2050
- hweight64 ((((1ULL << num_counters_fixed ) - 1 )
2051
- << INTEL_PMC_IDX_FIXED ) & intel_ctrl ));
2052
- pr_info ("... event mask: %016Lx\n" , intel_ctrl );
2050
+ pr_info ("... fixed-purpose events: %d\n" , x86_pmu_num_counters_fixed (pmu ));
2051
+ pr_info ("... event mask: %016Lx\n" , hybrid (pmu , intel_ctrl ));
2053
2052
}
2054
2053
2055
2054
static int __init init_hw_perf_events (void )
@@ -2086,7 +2085,7 @@ static int __init init_hw_perf_events(void)
2086
2085
pmu_check_apic ();
2087
2086
2088
2087
/* sanity check that the hardware exists or is emulated */
2089
- if (!check_hw_exists (& pmu , x86_pmu .num_counters , x86_pmu .num_counters_fixed ))
2088
+ if (!check_hw_exists (& pmu , x86_pmu .cntr_mask , x86_pmu .fixed_cntr_mask ))
2090
2089
goto out_bad_pmu ;
2091
2090
2092
2091
pr_cont ("%s PMU driver.\n" , x86_pmu .name );
@@ -2097,14 +2096,14 @@ static int __init init_hw_perf_events(void)
2097
2096
quirk -> func ();
2098
2097
2099
2098
if (!x86_pmu .intel_ctrl )
2100
- x86_pmu .intel_ctrl = ( 1 << x86_pmu .num_counters ) - 1 ;
2099
+ x86_pmu .intel_ctrl = x86_pmu .cntr_mask64 ;
2101
2100
2102
2101
perf_events_lapic_init ();
2103
2102
register_nmi_handler (NMI_LOCAL , perf_event_nmi_handler , 0 , "PMI" );
2104
2103
2105
2104
unconstrained = (struct event_constraint )
2106
- __EVENT_CONSTRAINT (0 , ( 1ULL << x86_pmu .num_counters ) - 1 ,
2107
- 0 , x86_pmu . num_counters , 0 , 0 );
2105
+ __EVENT_CONSTRAINT (0 , x86_pmu .cntr_mask64 ,
2106
+ 0 , x86_pmu_num_counters ( NULL ) , 0 , 0 );
2108
2107
2109
2108
x86_pmu_format_group .attrs = x86_pmu .format_attrs ;
2110
2109
@@ -2113,11 +2112,8 @@ static int __init init_hw_perf_events(void)
2113
2112
2114
2113
pmu .attr_update = x86_pmu .attr_update ;
2115
2114
2116
- if (!is_hybrid ()) {
2117
- x86_pmu_show_pmu_cap (x86_pmu .num_counters ,
2118
- x86_pmu .num_counters_fixed ,
2119
- x86_pmu .intel_ctrl );
2120
- }
2115
+ if (!is_hybrid ())
2116
+ x86_pmu_show_pmu_cap (NULL );
2121
2117
2122
2118
if (!x86_pmu .read )
2123
2119
x86_pmu .read = _x86_pmu_read ;
@@ -2481,7 +2477,7 @@ void perf_clear_dirty_counters(void)
2481
2477
for_each_set_bit (i , cpuc -> dirty , X86_PMC_IDX_MAX ) {
2482
2478
if (i >= INTEL_PMC_IDX_FIXED ) {
2483
2479
/* Metrics and fake events don't have corresponding HW counters. */
2484
- if ((i - INTEL_PMC_IDX_FIXED ) >= hybrid (cpuc -> pmu , num_counters_fixed ))
2480
+ if (! test_bit (i - INTEL_PMC_IDX_FIXED , hybrid (cpuc -> pmu , fixed_cntr_mask ) ))
2485
2481
continue ;
2486
2482
2487
2483
wrmsrl (MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED ), 0 );
@@ -2983,8 +2979,8 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2983
2979
* base PMU holds the correct number of counters for P-cores.
2984
2980
*/
2985
2981
cap -> version = x86_pmu .version ;
2986
- cap -> num_counters_gp = x86_pmu . num_counters ;
2987
- cap -> num_counters_fixed = x86_pmu . num_counters_fixed ;
2982
+ cap -> num_counters_gp = x86_pmu_num_counters ( NULL ) ;
2983
+ cap -> num_counters_fixed = x86_pmu_num_counters_fixed ( NULL ) ;
2988
2984
cap -> bit_width_gp = x86_pmu .cntval_bits ;
2989
2985
cap -> bit_width_fixed = x86_pmu .cntval_bits ;
2990
2986
cap -> events_mask = (unsigned int )x86_pmu .events_maskl ;
0 commit comments