@@ -158,7 +158,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
158158 * broken BIOS lack of nominal_freq and lowest_freq capabilities
159159 * definition in ACPI tables
160160 */
161- if (boot_cpu_has (X86_FEATURE_ZEN2 )) {
161+ if (cpu_feature_enabled (X86_FEATURE_ZEN2 )) {
162162 quirks = dmi -> driver_data ;
163163 pr_info ("Overriding nominal and lowest frequencies for %s\n" , dmi -> ident );
164164 return 1 ;
@@ -200,7 +200,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
200200 u64 epp ;
201201 int ret ;
202202
203- if (boot_cpu_has (X86_FEATURE_CPPC )) {
203+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
204204 if (!cppc_req_cached ) {
205205 epp = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ ,
206206 & cppc_req_cached );
@@ -253,7 +253,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
253253 int ret ;
254254 struct cppc_perf_ctrls perf_ctrls ;
255255
256- if (boot_cpu_has (X86_FEATURE_CPPC )) {
256+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
257257 u64 value = READ_ONCE (cpudata -> cppc_req_cached );
258258
259259 value &= ~GENMASK_ULL (31 , 24 );
@@ -752,7 +752,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
752752{
753753 int ret ;
754754
755- if (boot_cpu_has (X86_FEATURE_CPPC )) {
755+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
756756 u64 cap1 ;
757757
758758 ret = rdmsrl_safe_on_cpu (cpu , MSR_AMD_CPPC_CAP1 , & cap1 );
@@ -991,7 +991,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
991991 /* It will be updated by governor */
992992 policy -> cur = policy -> cpuinfo .min_freq ;
993993
994- if (boot_cpu_has (X86_FEATURE_CPPC ))
994+ if (cpu_feature_enabled (X86_FEATURE_CPPC ))
995995 policy -> fast_switch_possible = true;
996996
997997 ret = freq_qos_add_request (& policy -> constraints , & cpudata -> req [0 ],
@@ -1224,7 +1224,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
12241224
12251225 cppc_state = mode ;
12261226
1227- if (boot_cpu_has (X86_FEATURE_CPPC ) || cppc_state == AMD_PSTATE_ACTIVE )
1227+ if (cpu_feature_enabled (X86_FEATURE_CPPC ) || cppc_state == AMD_PSTATE_ACTIVE )
12281228 return 0 ;
12291229
12301230 for_each_present_cpu (cpu ) {
@@ -1453,7 +1453,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
14531453 else
14541454 policy -> policy = CPUFREQ_POLICY_POWERSAVE ;
14551455
1456- if (boot_cpu_has (X86_FEATURE_CPPC )) {
1456+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
14571457 ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , & value );
14581458 if (ret )
14591459 return ret ;
@@ -1543,7 +1543,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
15431543 epp = 0 ;
15441544
15451545 /* Set initial EPP value */
1546- if (boot_cpu_has (X86_FEATURE_CPPC )) {
1546+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
15471547 value &= ~GENMASK_ULL (31 , 24 );
15481548 value |= (u64 )epp << 24 ;
15491549 }
@@ -1582,7 +1582,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
15821582 value = READ_ONCE (cpudata -> cppc_req_cached );
15831583 max_perf = READ_ONCE (cpudata -> highest_perf );
15841584
1585- if (boot_cpu_has (X86_FEATURE_CPPC )) {
1585+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
15861586 wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , value );
15871587 } else {
15881588 perf_ctrls .max_perf = max_perf ;
@@ -1616,7 +1616,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
16161616 value = READ_ONCE (cpudata -> cppc_req_cached );
16171617
16181618 mutex_lock (& amd_pstate_limits_lock );
1619- if (boot_cpu_has (X86_FEATURE_CPPC )) {
1619+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
16201620 cpudata -> epp_policy = CPUFREQ_POLICY_UNKNOWN ;
16211621
16221622 /* Set max perf same as min perf */
@@ -1819,7 +1819,7 @@ static int __init amd_pstate_init(void)
18191819 */
18201820 if (amd_pstate_acpi_pm_profile_undefined () ||
18211821 amd_pstate_acpi_pm_profile_server () ||
1822- !boot_cpu_has (X86_FEATURE_CPPC )) {
1822+ !cpu_feature_enabled (X86_FEATURE_CPPC )) {
18231823 pr_info ("driver load is disabled, boot with specific mode to enable this\n" );
18241824 return - ENODEV ;
18251825 }
@@ -1838,7 +1838,7 @@ static int __init amd_pstate_init(void)
18381838 }
18391839
18401840 /* capability check */
1841- if (boot_cpu_has (X86_FEATURE_CPPC )) {
1841+ if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
18421842 pr_debug ("AMD CPPC MSR based functionality is supported\n" );
18431843 if (cppc_state != AMD_PSTATE_ACTIVE )
18441844 current_pstate_driver -> adjust_perf = amd_pstate_adjust_perf ;
0 commit comments