Skip to content

Commit 16c977f

Browse files
dhananjay-AMDsuperm1
authored andcommitted
cpufreq/amd-pstate: Convert the amd_pstate_get/set_epp() to static calls
MSR and shared memory based systems have different mechanisms to get and set the epp value. Split those mechanisms into different functions and assign them appropriately to the static calls at boot time. This eliminates the need for the "if(cpu_feature_enabled(X86_FEATURE_CPPC))" checks at runtime. Also, propagate the error code from rdmsrl_on_cpu() and cppc_get_epp_perf() to *_get_epp()'s caller, instead of returning -EIO unconditionally. Signed-off-by: Dhananjay Ugwekar <[email protected]> Reviewed-by: Mario Limonciello <[email protected]> Reviewed-by: Gautham R. Shenoy <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Mario Limonciello <[email protected]>
1 parent 2993b29 commit 16c977f

File tree

1 file changed

+60
-32
lines changed

1 file changed

+60
-32
lines changed

drivers/cpufreq/amd-pstate.c

Lines changed: 60 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -180,26 +180,40 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
180180
static DEFINE_MUTEX(amd_pstate_limits_lock);
181181
static DEFINE_MUTEX(amd_pstate_driver_lock);
182182

183-
static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
183+
static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
184184
{
185185
u64 epp;
186186
int ret;
187187

188-
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
189-
if (!cppc_req_cached) {
190-
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
191-
&cppc_req_cached);
192-
if (epp)
193-
return epp;
194-
}
195-
epp = (cppc_req_cached >> 24) & 0xFF;
196-
} else {
197-
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
188+
if (!cppc_req_cached) {
189+
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached);
198190
if (ret < 0) {
199191
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
200-
return -EIO;
192+
return ret;
201193
}
202194
}
195+
epp = (cppc_req_cached >> 24) & 0xFF;
196+
197+
return (s16)epp;
198+
}
199+
200+
DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
201+
202+
static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
203+
{
204+
return static_call(amd_pstate_get_epp)(cpudata, cppc_req_cached);
205+
}
206+
207+
static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
208+
{
209+
u64 epp;
210+
int ret;
211+
212+
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
213+
if (ret < 0) {
214+
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
215+
return ret;
216+
}
203217

204218
return (s16)(epp & 0xff);
205219
}
@@ -253,33 +267,45 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
253267
max_perf, fast_switch);
254268
}
255269

256-
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
270+
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
257271
{
258272
int ret;
259-
struct cppc_perf_ctrls perf_ctrls;
260-
261-
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
262-
u64 value = READ_ONCE(cpudata->cppc_req_cached);
263273

264-
value &= ~GENMASK_ULL(31, 24);
265-
value |= (u64)epp << 24;
266-
WRITE_ONCE(cpudata->cppc_req_cached, value);
274+
u64 value = READ_ONCE(cpudata->cppc_req_cached);
267275

268-
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
269-
if (!ret)
270-
cpudata->epp_cached = epp;
271-
} else {
272-
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
273-
cpudata->max_limit_perf, false);
276+
value &= ~GENMASK_ULL(31, 24);
277+
value |= (u64)epp << 24;
278+
WRITE_ONCE(cpudata->cppc_req_cached, value);
274279

275-
perf_ctrls.energy_perf = epp;
276-
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
277-
if (ret) {
278-
pr_debug("failed to set energy perf value (%d)\n", ret);
279-
return ret;
280-
}
280+
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
281+
if (!ret)
281282
cpudata->epp_cached = epp;
283+
284+
return ret;
285+
}
286+
287+
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
288+
289+
static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
290+
{
291+
return static_call(amd_pstate_set_epp)(cpudata, epp);
292+
}
293+
294+
static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
295+
{
296+
int ret;
297+
struct cppc_perf_ctrls perf_ctrls;
298+
299+
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
300+
cpudata->max_limit_perf, false);
301+
302+
perf_ctrls.energy_perf = epp;
303+
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
304+
if (ret) {
305+
pr_debug("failed to set energy perf value (%d)\n", ret);
306+
return ret;
282307
}
308+
cpudata->epp_cached = epp;
283309

284310
return ret;
285311
}
@@ -1869,6 +1895,8 @@ static int __init amd_pstate_init(void)
18691895
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
18701896
static_call_update(amd_pstate_init_perf, shmem_init_perf);
18711897
static_call_update(amd_pstate_update_perf, shmem_update_perf);
1898+
static_call_update(amd_pstate_get_epp, shmem_get_epp);
1899+
static_call_update(amd_pstate_set_epp, shmem_set_epp);
18721900
}
18731901

18741902
if (amd_pstate_prefcore) {

0 commit comments

Comments
 (0)