@@ -369,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu)
369
369
}
370
370
}
371
371
372
- static int intel_pstate_get_cppc_guranteed (int cpu )
372
+ static int intel_pstate_get_cppc_guaranteed (int cpu )
373
373
{
374
374
struct cppc_perf_caps cppc_perf ;
375
375
int ret ;
@@ -385,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
385
385
}
386
386
387
387
#else /* CONFIG_ACPI_CPPC_LIB */
388
- static void intel_pstate_set_itmt_prio (int cpu )
388
+ static inline void intel_pstate_set_itmt_prio (int cpu )
389
389
{
390
390
}
391
391
#endif /* CONFIG_ACPI_CPPC_LIB */
@@ -470,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
470
470
471
471
acpi_processor_unregister_performance (policy -> cpu );
472
472
}
473
+
474
+ static bool intel_pstate_cppc_perf_valid (u32 perf , struct cppc_perf_caps * caps )
475
+ {
476
+ return perf && perf <= caps -> highest_perf && perf >= caps -> lowest_perf ;
477
+ }
478
+
479
+ static bool intel_pstate_cppc_perf_caps (struct cpudata * cpu ,
480
+ struct cppc_perf_caps * caps )
481
+ {
482
+ if (cppc_get_perf_caps (cpu -> cpu , caps ))
483
+ return false;
484
+
485
+ return caps -> highest_perf && caps -> lowest_perf <= caps -> highest_perf ;
486
+ }
473
487
#else /* CONFIG_ACPI */
474
488
static inline void intel_pstate_init_acpi_perf_limits (struct cpufreq_policy * policy )
475
489
{
@@ -486,26 +500,12 @@ static inline bool intel_pstate_acpi_pm_profile_server(void)
486
500
#endif /* CONFIG_ACPI */
487
501
488
502
#ifndef CONFIG_ACPI_CPPC_LIB
489
- static int intel_pstate_get_cppc_guranteed (int cpu )
503
+ static inline int intel_pstate_get_cppc_guaranteed (int cpu )
490
504
{
491
505
return - ENOTSUPP ;
492
506
}
493
507
#endif /* CONFIG_ACPI_CPPC_LIB */
494
508
495
- static bool intel_pstate_cppc_perf_valid (u32 perf , struct cppc_perf_caps * caps )
496
- {
497
- return perf && perf <= caps -> highest_perf && perf >= caps -> lowest_perf ;
498
- }
499
-
500
- static bool intel_pstate_cppc_perf_caps (struct cpudata * cpu ,
501
- struct cppc_perf_caps * caps )
502
- {
503
- if (cppc_get_perf_caps (cpu -> cpu , caps ))
504
- return false;
505
-
506
- return caps -> highest_perf && caps -> lowest_perf <= caps -> highest_perf ;
507
- }
508
-
509
509
static void intel_pstate_hybrid_hwp_perf_ctl_parity (struct cpudata * cpu )
510
510
{
511
511
pr_debug ("CPU%d: Using PERF_CTL scaling for HWP\n" , cpu -> cpu );
@@ -530,7 +530,6 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
530
530
*/
531
531
static void intel_pstate_hybrid_hwp_calibrate (struct cpudata * cpu )
532
532
{
533
- struct cppc_perf_caps caps ;
534
533
int perf_ctl_max_phys = cpu -> pstate .max_pstate_physical ;
535
534
int perf_ctl_scaling = cpu -> pstate .perf_ctl_scaling ;
536
535
int perf_ctl_turbo = pstate_funcs .get_turbo ();
@@ -548,33 +547,39 @@ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
548
547
pr_debug ("CPU%d: HWP_CAP guaranteed = %d\n" , cpu -> cpu , cpu -> pstate .max_pstate );
549
548
pr_debug ("CPU%d: HWP_CAP highest = %d\n" , cpu -> cpu , cpu -> pstate .turbo_pstate );
550
549
551
- if (intel_pstate_cppc_perf_caps (cpu , & caps )) {
552
- if (intel_pstate_cppc_perf_valid (caps .nominal_perf , & caps )) {
553
- pr_debug ("CPU%d: Using CPPC nominal\n" , cpu -> cpu );
554
-
555
- /*
556
- * If the CPPC nominal performance is valid, it can be
557
- * assumed to correspond to cpu_khz.
558
- */
559
- if (caps .nominal_perf == perf_ctl_max_phys ) {
560
- intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
561
- return ;
562
- }
563
- scaling = DIV_ROUND_UP (cpu_khz , caps .nominal_perf );
564
- } else if (intel_pstate_cppc_perf_valid (caps .guaranteed_perf , & caps )) {
565
- pr_debug ("CPU%d: Using CPPC guaranteed\n" , cpu -> cpu );
566
-
567
- /*
568
- * If the CPPC guaranteed performance is valid, it can
569
- * be assumed to correspond to max_freq.
570
- */
571
- if (caps .guaranteed_perf == perf_ctl_max ) {
572
- intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
573
- return ;
550
+ #ifdef CONFIG_ACPI
551
+ if (IS_ENABLED (CONFIG_ACPI_CPPC_LIB )) {
552
+ struct cppc_perf_caps caps ;
553
+
554
+ if (intel_pstate_cppc_perf_caps (cpu , & caps )) {
555
+ if (intel_pstate_cppc_perf_valid (caps .nominal_perf , & caps )) {
556
+ pr_debug ("CPU%d: Using CPPC nominal\n" , cpu -> cpu );
557
+
558
+ /*
559
+ * If the CPPC nominal performance is valid, it
560
+ * can be assumed to correspond to cpu_khz.
561
+ */
562
+ if (caps .nominal_perf == perf_ctl_max_phys ) {
563
+ intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
564
+ return ;
565
+ }
566
+ scaling = DIV_ROUND_UP (cpu_khz , caps .nominal_perf );
567
+ } else if (intel_pstate_cppc_perf_valid (caps .guaranteed_perf , & caps )) {
568
+ pr_debug ("CPU%d: Using CPPC guaranteed\n" , cpu -> cpu );
569
+
570
+ /*
571
+ * If the CPPC guaranteed performance is valid,
572
+ * it can be assumed to correspond to max_freq.
573
+ */
574
+ if (caps .guaranteed_perf == perf_ctl_max ) {
575
+ intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
576
+ return ;
577
+ }
578
+ scaling = DIV_ROUND_UP (max_freq , caps .guaranteed_perf );
574
579
}
575
- scaling = DIV_ROUND_UP (max_freq , caps .guaranteed_perf );
576
580
}
577
581
}
582
+ #endif
578
583
/*
579
584
* If using the CPPC data to compute the HWP-to-frequency scaling factor
580
585
* doesn't work, use the HWP_CAP gauranteed perf for this purpose with
@@ -944,7 +949,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
944
949
struct cpudata * cpu = all_cpu_data [policy -> cpu ];
945
950
int ratio , freq ;
946
951
947
- ratio = intel_pstate_get_cppc_guranteed (policy -> cpu );
952
+ ratio = intel_pstate_get_cppc_guaranteed (policy -> cpu );
948
953
if (ratio <= 0 ) {
949
954
u64 cap ;
950
955
0 commit comments